My milkv-duo board is running linux , i need to access the HPM registers to get the execution cycles of instruction executed. I will attch with the platform.c file which i update and cross-compile and boot on milkv-duo but still I couldn’t get the access of HPM registers.
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#include <libfdt.h>
#include <platform_override.h>
#include <sbi/riscv_asm.h>
#include <sbi/sbi_hartmask.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_string.h>
#include <sbi_utils/fdt/fdt_domain.h>
#include <sbi_utils/fdt/fdt_fixup.h>
#include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/irqchip/fdt_irqchip.h>
#include <sbi_utils/serial/fdt_serial.h>
#include <sbi_utils/timer/fdt_timer.h>
#include <sbi_utils/ipi/fdt_ipi.h>
#include <sbi_utils/reset/fdt_reset.h>
#define SBI_HART_HAS_MCOUNTEREN (1UL << 0)
#define SBI_HART_HAS_MSTATUS_FS (1UL << 1)
#define SBI_HART_HAS_MSTATUS_VS (1UL << 2)
#define SBI_HART_HAS_MISA (1UL << 3)
extern const struct platform_override sifive_fu540;
static const struct platform_override *special_platforms[] = {
&sifive_fu540,
};
static const struct platform_override *generic_plat = NULL;
static const struct fdt_match *generic_plat_match = NULL;
static void fw_platform_lookup_special(void *fdt, int root_offset)
{
int pos, noff;
const struct platform_override *plat;
const struct fdt_match *match;
for (pos = 0; pos < array_size(special_platforms); pos++) {
plat = special_platforms[pos];
if (!plat->match_table)
continue;
noff = fdt_find_match(fdt, -1, plat->match_table, &match);
if (noff < 0)
continue;
generic_plat = plat;
generic_plat_match = match;
break;
}
}
extern struct sbi_platform platform;
static u32 generic_hart_index2id[SBI_HARTMASK_MAX_BITS] = { 0 };
/*
* The fw_platform_init() function is called very early on the boot HART
* OpenSBI reference firmwares so that platform specific code get chance
* to update "platform" instance before it is used.
*
* The arguments passed to fw_platform_init() function are boot time state
* of A0 to A4 register. The "arg0" will be boot HART id and "arg1" will
* be address of FDT passed by previous booting stage.
*
* The return value of fw_platform_init() function is the FDT location. If
* FDT is unchanged (or FDT is modified in-place) then fw_platform_init()
* can always return the original FDT location (i.e. 'arg1') unmodified.
*/
unsigned long fw_platform_init(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
const char *model;
void *fdt = (void *)arg1;
u32 hartid, hart_count = 0;
int rc, root_offset, cpus_offset, cpu_offset, len;
root_offset = fdt_path_offset(fdt, "/");
unsigned long mstatus = csr_read(CSR_MSTATUS);
mstatus &= ~(3 << 11); // Clear MPP field
mstatus |= (3 << 11); // Set MPP to M-mode
csr_write(CSR_MSTATUS, mstatus);
if (root_offset < 0)
goto fail;
fw_platform_lookup_special(fdt, root_offset);
model = fdt_getprop(fdt, root_offset, "model", &len);
if (model)
sbi_strncpy(platform.name, model, sizeof(platform.name));
if (generic_plat && generic_plat->features){
platform.features = generic_plat->features(generic_plat_match);
}
csr_write(CSR_MCOUNTEREN, 0xFFFFFFFF); // Allow access to all counters
csr_write(CSR_MCOUNTERWEN, 0xFFFFFFFF); // Enable writes to all counters
csr_write(CSR_MCOUNTINHIBIT, 0x0); // Enable cycle/instruction counting
platform.features |= SBI_HART_HAS_MCOUNTEREN |
SBI_HART_HAS_MSTATUS_FS |
SBI_HART_HAS_MSTATUS_VS |
SBI_HART_HAS_MISA;
cpus_offset = fdt_path_offset(fdt, "/cpus");
if (cpus_offset < 0)
goto fail;
fdt_for_each_subnode(cpu_offset, fdt, cpus_offset) {
rc = fdt_parse_hart_id(fdt, cpu_offset, &hartid);
if (rc)
continue;
if (SBI_HARTMASK_MAX_BITS <= hartid)
continue;
generic_hart_index2id[hart_count++] = hartid;
}
platform.hart_count = hart_count;
/* Return original FDT pointer */
return arg1;
fail:
while (1)
wfi();
}
static int generic_early_init(bool cold_boot)
{
int rc;
if (generic_plat && generic_plat->early_init) {
rc = generic_plat->early_init(cold_boot, generic_plat_match);
if (rc)
return rc;
}
if (!cold_boot)
return 0;
return fdt_reset_init();
}
static int generic_final_init(bool cold_boot)
{
void *fdt;
int rc;
if (generic_plat && generic_plat->final_init) {
rc = generic_plat->final_init(cold_boot, generic_plat_match);
if (rc)
return rc;
}
if (!cold_boot)
return 0;
fdt = sbi_scratch_thishart_arg1_ptr();
fdt_cpu_fixup(fdt);
fdt_fixups(fdt);
fdt_domain_fixup(fdt);
if (generic_plat && generic_plat->fdt_fixup) {
rc = generic_plat->fdt_fixup(fdt, generic_plat_match);
if (rc)
return rc;
}
return 0;
}
static void generic_early_exit(void)
{
if (generic_plat && generic_plat->early_exit)
generic_plat->early_exit(generic_plat_match);
}
static void generic_final_exit(void)
{
if (generic_plat && generic_plat->final_exit)
generic_plat->final_exit(generic_plat_match);
}
static int generic_domains_init(void)
{
return fdt_domains_populate(sbi_scratch_thishart_arg1_ptr());
}
static u64 generic_tlbr_flush_limit(void)
{
if (generic_plat && generic_plat->tlbr_flush_limit)
return generic_plat->tlbr_flush_limit(generic_plat_match);
return SBI_PLATFORM_TLB_RANGE_FLUSH_LIMIT_DEFAULT;
}
#include <sbi/sbi_trap.h>
#define CSR_MCOUNTERWEN 0x7c9
static void sbi_thead_pmu_init(void)
{
csr_write(CSR_MIDELEG, 0x0); // Do NOT delegate any interrupts to S-mode
csr_write(CSR_MEDELEG, 0x0); // Do NOT delegate any exceptions to S-mode
csr_write(CSR_MCOUNTEREN, 0xFFFFFFFF); // Allow MCYCLE, MINSTRET, MHPMCOUNTER3-31
csr_write(CSR_MCOUNTERWEN, 0xFFFFFFFF); // Enable M-mode writes to these CSRs
csr_write(CSR_MCOUNTINHIBIT, 0x0); // Allow performance counting
// In OpenSBI initialization:
csr_write(CSR_SCOUNTEREN, 0x1);
csr_write(CSR_MHPMCR, (0 << 13) | (0 << 11) | (0 << 10)); // Allow U-mode access
/* CSR_MCOUNTEREN has already been set in mstatus_init() */
csr_write(CSR_MHPMEVENT3, 1);
csr_write(CSR_MHPMEVENT4, 2);
csr_write(CSR_MHPMEVENT5, 3);
csr_write(CSR_MHPMEVENT6, 4);
csr_write(CSR_MHPMEVENT7, 5);
csr_write(CSR_MHPMEVENT8, 6);
csr_write(CSR_MHPMEVENT9, 7);
csr_write(CSR_MHPMEVENT10, 8);
csr_write(CSR_MHPMEVENT11, 9);
csr_write(CSR_MHPMEVENT12, 10);
csr_write(CSR_MHPMEVENT13, 11);
csr_write(CSR_MHPMEVENT14, 12);
csr_write(CSR_MHPMEVENT15, 13);
csr_write(CSR_MHPMEVENT16, 14);
csr_write(CSR_MHPMEVENT17, 15);
csr_write(CSR_MHPMEVENT18, 16);
csr_write(CSR_MHPMEVENT19, 17);
csr_write(CSR_MHPMEVENT20, 18);
csr_write(CSR_MHPMEVENT21, 19);
csr_write(CSR_MHPMEVENT22, 20);
csr_write(CSR_MHPMEVENT23, 21);
csr_write(CSR_MHPMEVENT24, 22);
csr_write(CSR_MHPMEVENT25, 23);
csr_write(CSR_MHPMEVENT26, 24);
csr_write(CSR_MHPMEVENT27, 25);
csr_write(CSR_MHPMEVENT28, 26);
}
static void sbi_thead_pmu_map(unsigned long idx, unsigned long event_id)
{
switch (idx) {
case 3:
csr_write(CSR_MHPMEVENT3, event_id);
break;
case 4:
csr_write(CSR_MHPMEVENT4, event_id);
break;
case 5:
csr_write(CSR_MHPMEVENT5, event_id);
break;
case 6:
csr_write(CSR_MHPMEVENT6, event_id);
break;
case 7:
csr_write(CSR_MHPMEVENT7, event_id);
break;
case 8:
csr_write(CSR_MHPMEVENT8, event_id);
break;
case 9:
csr_write(CSR_MHPMEVENT9, event_id);
break;
case 10:
csr_write(CSR_MHPMEVENT10, event_id);
break;
case 11:
csr_write(CSR_MHPMEVENT11, event_id);
break;
case 12:
csr_write(CSR_MHPMEVENT12, event_id);
break;
case 13:
csr_write(CSR_MHPMEVENT13, event_id);
break;
case 14:
csr_write(CSR_MHPMEVENT14, event_id);
break;
case 15:
csr_write(CSR_MHPMEVENT15, event_id);
break;
case 16:
csr_write(CSR_MHPMEVENT16, event_id);
break;
case 17:
csr_write(CSR_MHPMEVENT17, event_id);
break;
case 18:
csr_write(CSR_MHPMEVENT18, event_id);
break;
case 19:
csr_write(CSR_MHPMEVENT19, event_id);
break;
case 20:
csr_write(CSR_MHPMEVENT20, event_id);
break;
case 21:
csr_write(CSR_MHPMEVENT21, event_id);
break;
case 22:
csr_write(CSR_MHPMEVENT22, event_id);
break;
case 23:
csr_write(CSR_MHPMEVENT23, event_id);
break;
case 24:
csr_write(CSR_MHPMEVENT24, event_id);
break;
case 25:
csr_write(CSR_MHPMEVENT25, event_id);
break;
case 26:
csr_write(CSR_MHPMEVENT26, event_id);
break;
case 27:
csr_write(CSR_MHPMEVENT27, event_id);
break;
case 28:
csr_write(CSR_MHPMEVENT28, event_id);
break;
case 29:
csr_write(CSR_MHPMEVENT29, event_id);
break;
case 30:
csr_write(CSR_MHPMEVENT30, event_id);
break;
case 31:
csr_write(CSR_MHPMEVENT31, event_id);
break;
}
}
static void sbi_thead_pmu_set(unsigned long type, unsigned long idx, unsigned long event_id)
{
switch (type) {
case 2:
sbi_thead_pmu_map(idx, event_id);
break;
default:
sbi_thead_pmu_init();
break;
}
}
static int thead_vendor_ext_provider(long extid, long funcid,
const struct sbi_trap_regs *regs, unsigned long *out_value,
struct sbi_trap_info *out_trap)
{
switch (extid) {
case 0x09000001: // Custom SBI extension
switch (funcid) {
case 0: // Read MCYCLE (execution cycles)
*out_value = csr_read(CSR_MCYCLE);
return 0;
case 1: // Read MINSTRET (retired instructions)
*out_value = csr_read(CSR_MINSTRET);
return 0;
default:
return SBI_ERR_NOT_SUPPORTED;
}
default:
return SBI_ERR_NOT_SUPPORTED;
}
}
const struct sbi_platform_operations platform_ops = {
.vendor_ext_provider = thead_vendor_ext_provider,
.early_init = generic_early_init,
.final_init = generic_final_init,
.early_exit = generic_early_exit,
.final_exit = generic_final_exit,
.domains_init = generic_domains_init,
.console_init = fdt_serial_init,
.irqchip_init = fdt_irqchip_init,
.irqchip_exit = fdt_irqchip_exit,
.ipi_init = fdt_ipi_init,
.ipi_exit = fdt_ipi_exit,
.get_tlbr_flush_limit = generic_tlbr_flush_limit,
.timer_init = fdt_timer_init,
.timer_exit = fdt_timer_exit,
};
struct sbi_platform platform = {
.opensbi_version = OPENSBI_VERSION,
.platform_version = SBI_PLATFORM_VERSION(0x0, 0x01),
.name = "Generic",
.features = SBI_PLATFORM_DEFAULT_FEATURES,
.hart_count = SBI_HARTMASK_MAX_BITS,
.hart_index2id = generic_hart_index2id,
.hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE,
.platform_ops_addr = (unsigned long)&platform_ops
};
Can someone help