perf regs: Remove __weak attributive arch__xxx_reg_mask() functions

Currently, some architecture-specific perf-regs functions, such as
arch__intr_reg_mask() and arch__user_reg_mask(), are defined with the
__weak attribute.

This approach ensures that only functions matching the architecture of
the build/run host are compiled and executed, reducing build time and
binary size.

However, this __weak attribute restricts these functions to be called
only on the same architecture, preventing cross-architecture
functionality.

For example, a perf.data file captured on x86 cannot be parsed on an ARM
platform.

To address this limitation, this patch removes the __weak attribute from
these perf-regs functions.

The architecture-specific code is moved from the arch/ directory to the
util/perf-regs-arch/ directory.

The appropriate architectural functions are then called based on the
EM_HOST.

No functional changes are intended.

Suggested-by: Ian Rogers <irogers@google.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Guo Ren <guoren@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <pjw@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xudong Hao <xudong.hao@intel.com>
Cc: Zide Chen <zide.chen@intel.com>
[ Fixed up somme fuzz with s390 and riscv Build files wrt removing perf_regs.o ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Dapeng Mi 2026-02-03 10:43:55 +08:00 committed by Arnaldo Carvalho de Melo
parent e716e69cf6
commit 16dccbb842
30 changed files with 332 additions and 236 deletions

View File

@ -1,5 +1,3 @@
perf-util-y += perf_regs.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-y += pmu.o auxtrace.o cs-etm.o

View File

@ -1,13 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../../util/perf_regs.h"
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -103,39 +103,3 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_REGS_USER,
.disabled = 1,
.exclude_kernel = 1,
.sample_period = 1,
.sample_regs_user = PERF_REGS_MASK
};
int fd;
if (getauxval(AT_HWCAP) & HWCAP_SVE)
attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG);
/*
* Check if the pmu supports perf extended regs, before
* returning the register mask to sample.
*/
if (attr.sample_regs_user != PERF_REGS_MASK) {
event_attr_init(&attr);
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
return attr.sample_regs_user;
}
}
return PERF_REGS_MASK;
}

View File

@ -1 +0,0 @@
perf-util-y += util/

View File

@ -1 +0,0 @@
perf-util-y += perf_regs.o

View File

@ -1,13 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../util/perf_regs.h"
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -1,5 +1,4 @@
perf-util-y += header.o
perf-util-y += perf_regs.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o

View File

@ -1,13 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../../util/perf_regs.h"
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -1,2 +1 @@
perf-util-y += perf_regs.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o

View File

@ -1,13 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../util/perf_regs.h"
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -123,50 +123,3 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
uint64_t arch__intr_reg_mask(void)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_REGS_INTR,
.precise_ip = 1,
.disabled = 1,
.exclude_kernel = 1,
};
int fd;
u32 version;
u64 extended_mask = 0, mask = PERF_REGS_MASK;
/*
* Get the PVR value to set the extended
* mask specific to platform.
*/
version = (((mfspr(SPRN_PVR)) >> 16) & 0xFFFF);
if (version == PVR_POWER9)
extended_mask = PERF_REG_PMU_MASK_300;
else if ((version == PVR_POWER10) || (version == PVR_POWER11))
extended_mask = PERF_REG_PMU_MASK_31;
else
return mask;
attr.sample_regs_intr = extended_mask;
attr.sample_period = 1;
event_attr_init(&attr);
/*
* check if the pmu supports perf extended regs, before
* returning the register mask to sample.
*/
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
mask |= extended_mask;
}
return mask;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -10,10 +10,15 @@
#define PERF_REGS_MASK ((1ULL << PERF_REG_RISCV_MAX) - 1)
#define PERF_REGS_MAX PERF_REG_RISCV_MAX
#if defined(__riscv_xlen)
#if __riscv_xlen == 64
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
#else
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
#endif
#else
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_NONE
#endif
#endif /* ARCH_PERF_REGS_H */

View File

@ -1,2 +1 @@
perf-util-y += perf_regs.o
perf-util-y += header.o

View File

@ -1,13 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../util/perf_regs.h"
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -1,5 +1,4 @@
perf-util-y += header.o
perf-util-y += perf_regs.o
perf-util-y += machine.o
perf-util-y += pmu.o

View File

@ -1,13 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf_regs.h"
#include "../../util/perf_regs.h"
uint64_t arch__intr_reg_mask(void)
{
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -233,51 +233,3 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
uint64_t arch__intr_reg_mask(void)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_REGS_INTR,
.sample_regs_intr = PERF_REG_EXTENDED_MASK,
.precise_ip = 1,
.disabled = 1,
.exclude_kernel = 1,
};
int fd;
/*
* In an unnamed union, init it here to build on older gcc versions
*/
attr.sample_period = 1;
if (perf_pmus__num_core_pmus() > 1) {
struct perf_pmu *pmu = NULL;
__u64 type = PERF_TYPE_RAW;
/*
* The same register set is supported among different hybrid PMUs.
* Only check the first available one.
*/
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
type = pmu->type;
break;
}
attr.config |= type << PERF_PMU_TYPE_SHIFT;
}
event_attr_init(&attr);
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK);
}
return PERF_REGS_MASK;
}
uint64_t arch__user_reg_mask(void)
{
return PERF_REGS_MASK;
}

View File

@ -1055,13 +1055,13 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
evsel__set_sample_bit(evsel, REGS_USER);
evsel__set_sample_bit(evsel, STACK_USER);
if (opts->sample_user_regs &&
DWARF_MINIMAL_REGS(e_machine) != arch__user_reg_mask()) {
DWARF_MINIMAL_REGS(e_machine) != perf_user_reg_mask(EM_HOST)) {
attr->sample_regs_user |= DWARF_MINIMAL_REGS(e_machine);
pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
"specifying a subset with --user-regs may render DWARF unwinding unreliable, "
"so the minimal registers set (IP, SP) is explicitly forced.\n");
} else {
attr->sample_regs_user |= arch__user_reg_mask();
attr->sample_regs_user |= perf_user_reg_mask(EM_HOST);
}
attr->sample_stack_user = param->dump_size;
attr->exclude_callchain_user = 1;

View File

@ -66,7 +66,7 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr)
if (*mode)
return -1;
mask = intr ? arch__intr_reg_mask() : arch__user_reg_mask();
mask = intr ? perf_intr_reg_mask(EM_HOST) : perf_user_reg_mask(EM_HOST);
/* str may be NULL in case no arg is passed to -I */
if (!str) {

View File

@ -1,7 +1,58 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <regex.h>
#include <string.h>
#include <sys/auxv.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include "../debug.h"
#include "../event.h"
#include "../perf_regs.h"
#include "../../../arch/arm64/include/uapi/asm/perf_regs.h"
#include "../../perf-sys.h"
#include "../../arch/arm64/include/perf_regs.h"
#define SMPL_REG_MASK(b) (1ULL << (b))
#ifndef HWCAP_SVE
#define HWCAP_SVE (1 << 22)
#endif
uint64_t __perf_reg_mask_arm64(bool intr)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_REGS_USER,
.disabled = 1,
.exclude_kernel = 1,
.sample_period = 1,
.sample_regs_user = PERF_REGS_MASK
};
int fd;
if (intr)
return PERF_REGS_MASK;
if (getauxval(AT_HWCAP) & HWCAP_SVE)
attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG);
/*
* Check if the pmu supports perf extended regs, before
* returning the register mask to sample. Open the event
* on the perf process to check this.
*/
if (attr.sample_regs_user != PERF_REGS_MASK) {
event_attr_init(&attr);
fd = sys_perf_event_open(&attr, /*pid=*/0, /*cpu=*/-1,
/*group_fd=*/-1, /*flags=*/0);
if (fd != -1) {
close(fd);
return attr.sample_regs_user;
}
}
return PERF_REGS_MASK;
}
const char *__perf_reg_name_arm64(int id)
{

View File

@ -1,7 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include "../perf_regs.h"
#include "../../../arch/arm/include/uapi/asm/perf_regs.h"
#include "../../arch/arm/include/perf_regs.h"
uint64_t __perf_reg_mask_arm(bool intr __maybe_unused)
{
return PERF_REGS_MASK;
}
const char *__perf_reg_name_arm(int id)
{

View File

@ -9,7 +9,12 @@
#include "../perf_regs.h"
#undef __CSKYABIV2__
#define __CSKYABIV2__ 1 // Always want the V2 register definitions.
#include "../../arch/csky/include/uapi/asm/perf_regs.h"
#include "../../arch/csky/include/perf_regs.h"
uint64_t __perf_reg_mask_csky(bool intr __maybe_unused)
{
return PERF_REGS_MASK;
}
const char *__perf_reg_name_csky(int id, uint32_t e_flags)
{

View File

@ -1,7 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include "../perf_regs.h"
#include "../../../arch/loongarch/include/uapi/asm/perf_regs.h"
#include "../../arch/loongarch/include/perf_regs.h"
uint64_t __perf_reg_mask_loongarch(bool intr __maybe_unused)
{
return PERF_REGS_MASK;
}
const char *__perf_reg_name_loongarch(int id)
{

View File

@ -1,7 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include "../perf_regs.h"
#include "../../../arch/mips/include/uapi/asm/perf_regs.h"
#include "../../arch/mips/include/perf_regs.h"
uint64_t __perf_reg_mask_mips(bool intr __maybe_unused)
{
return PERF_REGS_MASK;
}
const char *__perf_reg_name_mips(int id)
{

View File

@ -1,7 +1,82 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <string.h>
#include <regex.h>
#include <linux/zalloc.h>
#include "../debug.h"
#include "../event.h"
#include "../header.h"
#include "../perf_regs.h"
#include "../../../arch/powerpc/include/uapi/asm/perf_regs.h"
#include "../../perf-sys.h"
#include "../../arch/powerpc/util/utils_header.h"
#include "../../arch/powerpc/include/perf_regs.h"
#include <linux/kernel.h>
#define PVR_POWER9 0x004E
#define PVR_POWER10 0x0080
#define PVR_POWER11 0x0082
/*
* mfspr is a POWERPC specific instruction, ensure it's only
* built and called on POWERPC by guarding with __powerpc64__
* or __powerpc__.
*/
#if defined(__powerpc64__) && defined(__powerpc__)
uint64_t __perf_reg_mask_powerpc(bool intr)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_REGS_INTR,
.precise_ip = 1,
.disabled = 1,
.exclude_kernel = 1,
};
int fd;
u32 version;
u64 extended_mask = 0, mask = PERF_REGS_MASK;
if (!intr)
return PERF_REGS_MASK;
/*
* Get the PVR value to set the extended
* mask specific to platform.
*/
version = (((mfspr(SPRN_PVR)) >> 16) & 0xFFFF);
if (version == PVR_POWER9)
extended_mask = PERF_REG_PMU_MASK_300;
else if ((version == PVR_POWER10) || (version == PVR_POWER11))
extended_mask = PERF_REG_PMU_MASK_31;
else
return mask;
attr.sample_regs_intr = extended_mask;
attr.sample_period = 1;
event_attr_init(&attr);
/*
* Check if the pmu supports perf extended regs, before
* returning the register mask to sample. Open the event
* on the perf process to check this.
*/
fd = sys_perf_event_open(&attr, /*pid=*/0, /*cpu=*/-1,
/*group_fd=*/-1, /*flags=*/0);
if (fd != -1) {
close(fd);
mask |= extended_mask;
}
return mask;
}
#else
uint64_t __perf_reg_mask_powerpc(bool intr __maybe_unused)
{
return PERF_REGS_MASK;
}
#endif
const char *__perf_reg_name_powerpc(int id)
{

View File

@ -1,7 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include "../perf_regs.h"
#include "../../../arch/riscv/include/uapi/asm/perf_regs.h"
#include "../../arch/riscv/include/perf_regs.h"
uint64_t __perf_reg_mask_riscv(bool intr __maybe_unused)
{
return PERF_REGS_MASK;
}
const char *__perf_reg_name_riscv(int id)
{

View File

@ -1,7 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include "../perf_regs.h"
#include "../../../arch/s390/include/uapi/asm/perf_regs.h"
#include "../../arch/s390/include/perf_regs.h"
uint64_t __perf_reg_mask_s390(bool intr __maybe_unused)
{
return PERF_REGS_MASK;
}
const char *__perf_reg_name_s390(int id)
{

View File

@ -1,7 +1,65 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <string.h>
#include <regex.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include "../debug.h"
#include "../event.h"
#include "../pmu.h"
#include "../pmus.h"
#include "../perf_regs.h"
#include "../../../arch/x86/include/uapi/asm/perf_regs.h"
#include "../../perf-sys.h"
#include "../../arch/x86/include/perf_regs.h"
uint64_t __perf_reg_mask_x86(bool intr)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.sample_type = PERF_SAMPLE_REGS_INTR,
.sample_regs_intr = PERF_REG_EXTENDED_MASK,
.precise_ip = 1,
.disabled = 1,
.exclude_kernel = 1,
};
int fd;
if (!intr)
return PERF_REGS_MASK;
/*
* In an unnamed union, init it here to build on older gcc versions
*/
attr.sample_period = 1;
if (perf_pmus__num_core_pmus() > 1) {
struct perf_pmu *pmu = NULL;
__u64 type = PERF_TYPE_RAW;
/*
* The same register set is supported among different hybrid PMUs.
* Only check the first available one.
*/
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
type = pmu->type;
break;
}
attr.config |= type << PERF_PMU_TYPE_SHIFT;
}
event_attr_init(&attr);
fd = sys_perf_event_open(&attr, /*pid=*/0, /*cpu=*/-1,
/*group_fd=*/-1, /*flags=*/0);
if (fd != -1) {
close(fd);
return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK);
}
return PERF_REGS_MASK;
}
const char *__perf_reg_name_x86(int id)
{

View File

@ -13,14 +13,90 @@ int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
return SDT_ARG_SKIP;
}
uint64_t __weak arch__intr_reg_mask(void)
uint64_t perf_intr_reg_mask(uint16_t e_machine)
{
return 0;
uint64_t mask = 0;
switch (e_machine) {
case EM_ARM:
mask = __perf_reg_mask_arm(/*intr=*/true);
break;
case EM_AARCH64:
mask = __perf_reg_mask_arm64(/*intr=*/true);
break;
case EM_CSKY:
mask = __perf_reg_mask_csky(/*intr=*/true);
break;
case EM_LOONGARCH:
mask = __perf_reg_mask_loongarch(/*intr=*/true);
break;
case EM_MIPS:
mask = __perf_reg_mask_mips(/*intr=*/true);
break;
case EM_PPC:
case EM_PPC64:
mask = __perf_reg_mask_powerpc(/*intr=*/true);
break;
case EM_RISCV:
mask = __perf_reg_mask_riscv(/*intr=*/true);
break;
case EM_S390:
mask = __perf_reg_mask_s390(/*intr=*/true);
break;
case EM_386:
case EM_X86_64:
mask = __perf_reg_mask_x86(/*intr=*/true);
break;
default:
pr_debug("Unknown ELF machine %d, interrupt sampling register mask will be empty.\n",
e_machine);
break;
}
return mask;
}
uint64_t __weak arch__user_reg_mask(void)
uint64_t perf_user_reg_mask(uint16_t e_machine)
{
return 0;
uint64_t mask = 0;
switch (e_machine) {
case EM_ARM:
mask = __perf_reg_mask_arm(/*intr=*/false);
break;
case EM_AARCH64:
mask = __perf_reg_mask_arm64(/*intr=*/false);
break;
case EM_CSKY:
mask = __perf_reg_mask_csky(/*intr=*/false);
break;
case EM_LOONGARCH:
mask = __perf_reg_mask_loongarch(/*intr=*/false);
break;
case EM_MIPS:
mask = __perf_reg_mask_mips(/*intr=*/false);
break;
case EM_PPC:
case EM_PPC64:
mask = __perf_reg_mask_powerpc(/*intr=*/false);
break;
case EM_RISCV:
mask = __perf_reg_mask_riscv(/*intr=*/false);
break;
case EM_S390:
mask = __perf_reg_mask_s390(/*intr=*/false);
break;
case EM_386:
case EM_X86_64:
mask = __perf_reg_mask_x86(/*intr=*/false);
break;
default:
pr_debug("Unknown ELF machine %d, user sampling register mask will be empty.\n",
e_machine);
break;
}
return mask;
}
const char *perf_reg_name(int id, uint16_t e_machine, uint32_t e_flags)

View File

@ -13,37 +13,55 @@ enum {
};
int arch_sdt_arg_parse_op(char *old_op, char **new_op);
uint64_t arch__intr_reg_mask(void);
uint64_t arch__user_reg_mask(void);
uint64_t perf_intr_reg_mask(uint16_t e_machine);
uint64_t perf_user_reg_mask(uint16_t e_machine);
const char *perf_reg_name(int id, uint16_t e_machine, uint32_t e_flags);
int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
uint64_t perf_arch_reg_ip(uint16_t e_machine);
uint64_t perf_arch_reg_sp(uint16_t e_machine);
uint64_t __perf_reg_mask_arm64(bool intr);
const char *__perf_reg_name_arm64(int id);
uint64_t __perf_reg_ip_arm64(void);
uint64_t __perf_reg_sp_arm64(void);
uint64_t __perf_reg_mask_arm(bool intr);
const char *__perf_reg_name_arm(int id);
uint64_t __perf_reg_ip_arm(void);
uint64_t __perf_reg_sp_arm(void);
uint64_t __perf_reg_mask_csky(bool intr);
const char *__perf_reg_name_csky(int id, uint32_t e_flags);
uint64_t __perf_reg_ip_csky(void);
uint64_t __perf_reg_sp_csky(void);
uint64_t __perf_reg_mask_loongarch(bool intr);
const char *__perf_reg_name_loongarch(int id);
uint64_t __perf_reg_ip_loongarch(void);
uint64_t __perf_reg_sp_loongarch(void);
uint64_t __perf_reg_mask_mips(bool intr);
const char *__perf_reg_name_mips(int id);
uint64_t __perf_reg_ip_mips(void);
uint64_t __perf_reg_sp_mips(void);
uint64_t __perf_reg_mask_powerpc(bool intr);
const char *__perf_reg_name_powerpc(int id);
uint64_t __perf_reg_ip_powerpc(void);
uint64_t __perf_reg_sp_powerpc(void);
uint64_t __perf_reg_mask_riscv(bool intr);
const char *__perf_reg_name_riscv(int id);
uint64_t __perf_reg_ip_riscv(void);
uint64_t __perf_reg_sp_riscv(void);
uint64_t __perf_reg_mask_s390(bool intr);
const char *__perf_reg_name_s390(int id);
uint64_t __perf_reg_ip_s390(void);
uint64_t __perf_reg_sp_s390(void);
uint64_t __perf_reg_mask_x86(bool intr);
const char *__perf_reg_name_x86(int id);
uint64_t __perf_reg_ip_x86(void);
uint64_t __perf_reg_sp_x86(void);