aboutsummaryrefslogtreecommitdiff
path: root/jslinux-2019-12-21/tinyemu-2019-12-21/riscv_cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'jslinux-2019-12-21/tinyemu-2019-12-21/riscv_cpu.c')
-rw-r--r--jslinux-2019-12-21/tinyemu-2019-12-21/riscv_cpu.c1377
1 files changed, 1377 insertions, 0 deletions
diff --git a/jslinux-2019-12-21/tinyemu-2019-12-21/riscv_cpu.c b/jslinux-2019-12-21/tinyemu-2019-12-21/riscv_cpu.c
new file mode 100644
index 0000000..ba4fa4c
--- /dev/null
+++ b/jslinux-2019-12-21/tinyemu-2019-12-21/riscv_cpu.c
@@ -0,0 +1,1377 @@
+/*
+ * RISCV CPU emulator
+ *
+ * Copyright (c) 2016-2017 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include <fcntl.h>
+
+#include "cutils.h"
+#include "iomem.h"
+#include "riscv_cpu.h"
+
+#ifndef MAX_XLEN
+#error MAX_XLEN must be defined
+#endif
+#ifndef CONFIG_RISCV_MAX_XLEN
+#error CONFIG_RISCV_MAX_XLEN must be defined
+#endif
+
+//#define DUMP_INVALID_MEM_ACCESS
+//#define DUMP_MMU_EXCEPTIONS
+//#define DUMP_INTERRUPTS
+//#define DUMP_INVALID_CSR
+//#define DUMP_EXCEPTIONS
+//#define DUMP_CSR
+//#define CONFIG_LOGFILE
+
+#include "riscv_cpu_priv.h"
+
+#if FLEN > 0
+#include "softfp.h"
+#endif
+
+#ifdef USE_GLOBAL_STATE
+static RISCVCPUState riscv_cpu_global_state;
+#endif
+#ifdef USE_GLOBAL_VARIABLES
+#define code_ptr s->__code_ptr
+#define code_end s->__code_end
+#define code_to_pc_addend s->__code_to_pc_addend
+#endif
+
+#ifdef CONFIG_LOGFILE
+static FILE *log_file;
+
+static void log_vprintf(const char *fmt, va_list ap)
+{
+ if (!log_file)
+ log_file = fopen("/tmp/riscemu.log", "wb");
+ vfprintf(log_file, fmt, ap);
+}
+#else
+static void log_vprintf(const char *fmt, va_list ap)
+{
+ vprintf(fmt, ap);
+}
+#endif
+
+static void __attribute__((format(printf, 1, 2), unused)) log_printf(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ log_vprintf(fmt, ap);
+ va_end(ap);
+}
+
+#if MAX_XLEN == 128
+static void fprint_target_ulong(FILE *f, target_ulong a)
+{
+ fprintf(f, "%016" PRIx64 "%016" PRIx64, (uint64_t)(a >> 64), (uint64_t)a);
+}
+#else
+static void fprint_target_ulong(FILE *f, target_ulong a)
+{
+ fprintf(f, "%" PR_target_ulong, a);
+}
+#endif
+
+static void print_target_ulong(target_ulong a)
+{
+ fprint_target_ulong(stdout, a);
+}
+
+static char *reg_name[32] = {
+"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
+"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5",
+"a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6"
+};
+
+static void dump_regs(RISCVCPUState *s)
+{
+ int i, cols;
+ const char priv_str[4] = "USHM";
+ cols = 256 / MAX_XLEN;
+ printf("pc =");
+ print_target_ulong(s->pc);
+ printf(" ");
+ for(i = 1; i < 32; i++) {
+ printf("%-3s=", reg_name[i]);
+ print_target_ulong(s->reg[i]);
+ if ((i & (cols - 1)) == (cols - 1))
+ printf("\n");
+ else
+ printf(" ");
+ }
+ printf("priv=%c", priv_str[s->priv]);
+ printf(" mstatus=");
+ print_target_ulong(s->mstatus);
+ printf(" cycles=%" PRId64, s->insn_counter);
+ printf("\n");
+#if 1
+ printf(" mideleg=");
+ print_target_ulong(s->mideleg);
+ printf(" mie=");
+ print_target_ulong(s->mie);
+ printf(" mip=");
+ print_target_ulong(s->mip);
+ printf("\n");
+#endif
+}
+
+static __attribute__((unused)) void cpu_abort(RISCVCPUState *s)
+{
+ dump_regs(s);
+ abort();
+}
+
+/* addr must be aligned. Only RAM accesses are supported */
+#define PHYS_MEM_READ_WRITE(size, uint_type) \
+static __maybe_unused inline void phys_write_u ## size(RISCVCPUState *s, target_ulong addr,\
+ uint_type val) \
+{\
+ PhysMemoryRange *pr = get_phys_mem_range(s->mem_map, addr);\
+ if (!pr || !pr->is_ram)\
+ return;\
+ *(uint_type *)(pr->phys_mem + \
+ (uintptr_t)(addr - pr->addr)) = val;\
+}\
+\
+static __maybe_unused inline uint_type phys_read_u ## size(RISCVCPUState *s, target_ulong addr) \
+{\
+ PhysMemoryRange *pr = get_phys_mem_range(s->mem_map, addr);\
+ if (!pr || !pr->is_ram)\
+ return 0;\
+ return *(uint_type *)(pr->phys_mem + \
+ (uintptr_t)(addr - pr->addr)); \
+}
+
+PHYS_MEM_READ_WRITE(8, uint8_t)
+PHYS_MEM_READ_WRITE(32, uint32_t)
+PHYS_MEM_READ_WRITE(64, uint64_t)
+
+#define PTE_V_MASK (1 << 0)
+#define PTE_U_MASK (1 << 4)
+#define PTE_A_MASK (1 << 6)
+#define PTE_D_MASK (1 << 7)
+
+#define ACCESS_READ 0
+#define ACCESS_WRITE 1
+#define ACCESS_CODE 2
+
+/* access = 0: read, 1 = write, 2 = code. Set the exception_pending
+ field if necessary. return 0 if OK, -1 if translation error */
+static int get_phys_addr(RISCVCPUState *s,
+ target_ulong *ppaddr, target_ulong vaddr,
+ int access)
+{
+ int mode, levels, pte_bits, pte_idx, pte_mask, pte_size_log2, xwr, priv;
+ int need_write, vaddr_shift, i, pte_addr_bits;
+ target_ulong pte_addr, pte, vaddr_mask, paddr;
+
+ if ((s->mstatus & MSTATUS_MPRV) && access != ACCESS_CODE) {
+ /* use previous priviledge */
+ priv = (s->mstatus >> MSTATUS_MPP_SHIFT) & 3;
+ } else {
+ priv = s->priv;
+ }
+
+ if (priv == PRV_M) {
+ if (s->cur_xlen < MAX_XLEN) {
+ /* truncate virtual address */
+ *ppaddr = vaddr & (((target_ulong)1 << s->cur_xlen) - 1);
+ } else {
+ *ppaddr = vaddr;
+ }
+ return 0;
+ }
+#if MAX_XLEN == 32
+ /* 32 bits */
+ mode = s->satp >> 31;
+ if (mode == 0) {
+ /* bare: no translation */
+ *ppaddr = vaddr;
+ return 0;
+ } else {
+ /* sv32 */
+ levels = 2;
+ pte_size_log2 = 2;
+ pte_addr_bits = 22;
+ }
+#else
+ mode = (s->satp >> 60) & 0xf;
+ if (mode == 0) {
+ /* bare: no translation */
+ *ppaddr = vaddr;
+ return 0;
+ } else {
+ /* sv39/sv48 */
+ levels = mode - 8 + 3;
+ pte_size_log2 = 3;
+ vaddr_shift = MAX_XLEN - (PG_SHIFT + levels * 9);
+ if ((((target_long)vaddr << vaddr_shift) >> vaddr_shift) != vaddr)
+ return -1;
+ pte_addr_bits = 44;
+ }
+#endif
+ pte_addr = (s->satp & (((target_ulong)1 << pte_addr_bits) - 1)) << PG_SHIFT;
+ pte_bits = 12 - pte_size_log2;
+ pte_mask = (1 << pte_bits) - 1;
+ for(i = 0; i < levels; i++) {
+ vaddr_shift = PG_SHIFT + pte_bits * (levels - 1 - i);
+ pte_idx = (vaddr >> vaddr_shift) & pte_mask;
+ pte_addr += pte_idx << pte_size_log2;
+ if (pte_size_log2 == 2)
+ pte = phys_read_u32(s, pte_addr);
+ else
+ pte = phys_read_u64(s, pte_addr);
+ //printf("pte=0x%08" PRIx64 "\n", pte);
+ if (!(pte & PTE_V_MASK))
+ return -1; /* invalid PTE */
+ paddr = (pte >> 10) << PG_SHIFT;
+ xwr = (pte >> 1) & 7;
+ if (xwr != 0) {
+ if (xwr == 2 || xwr == 6)
+ return -1;
+ /* priviledge check */
+ if (priv == PRV_S) {
+ if ((pte & PTE_U_MASK) && !(s->mstatus & MSTATUS_SUM))
+ return -1;
+ } else {
+ if (!(pte & PTE_U_MASK))
+ return -1;
+ }
+ /* protection check */
+ /* MXR allows read access to execute-only pages */
+ if (s->mstatus & MSTATUS_MXR)
+ xwr |= (xwr >> 2);
+
+ if (((xwr >> access) & 1) == 0)
+ return -1;
+ need_write = !(pte & PTE_A_MASK) ||
+ (!(pte & PTE_D_MASK) && access == ACCESS_WRITE);
+ pte |= PTE_A_MASK;
+ if (access == ACCESS_WRITE)
+ pte |= PTE_D_MASK;
+ if (need_write) {
+ if (pte_size_log2 == 2)
+ phys_write_u32(s, pte_addr, pte);
+ else
+ phys_write_u64(s, pte_addr, pte);
+ }
+ vaddr_mask = ((target_ulong)1 << vaddr_shift) - 1;
+ *ppaddr = (vaddr & vaddr_mask) | (paddr & ~vaddr_mask);
+ return 0;
+ } else {
+ pte_addr = paddr;
+ }
+ }
+ return -1;
+}
+
+/* return 0 if OK, != 0 if exception */
+int target_read_slow(RISCVCPUState *s, mem_uint_t *pval,
+ target_ulong addr, int size_log2)
+{
+ int size, tlb_idx, err, al;
+ target_ulong paddr, offset;
+ uint8_t *ptr;
+ PhysMemoryRange *pr;
+ mem_uint_t ret;
+
+ /* first handle unaligned accesses */
+ size = 1 << size_log2;
+ al = addr & (size - 1);
+ if (al != 0) {
+ switch(size_log2) {
+ case 1:
+ {
+ uint8_t v0, v1;
+ err = target_read_u8(s, &v0, addr);
+ if (err)
+ return err;
+ err = target_read_u8(s, &v1, addr + 1);
+ if (err)
+ return err;
+ ret = v0 | (v1 << 8);
+ }
+ break;
+ case 2:
+ {
+ uint32_t v0, v1;
+ addr -= al;
+ err = target_read_u32(s, &v0, addr);
+ if (err)
+ return err;
+ err = target_read_u32(s, &v1, addr + 4);
+ if (err)
+ return err;
+ ret = (v0 >> (al * 8)) | (v1 << (32 - al * 8));
+ }
+ break;
+#if MLEN >= 64
+ case 3:
+ {
+ uint64_t v0, v1;
+ addr -= al;
+ err = target_read_u64(s, &v0, addr);
+ if (err)
+ return err;
+ err = target_read_u64(s, &v1, addr + 8);
+ if (err)
+ return err;
+ ret = (v0 >> (al * 8)) | (v1 << (64 - al * 8));
+ }
+ break;
+#endif
+#if MLEN >= 128
+ case 4:
+ {
+ uint128_t v0, v1;
+ addr -= al;
+ err = target_read_u128(s, &v0, addr);
+ if (err)
+ return err;
+ err = target_read_u128(s, &v1, addr + 16);
+ if (err)
+ return err;
+ ret = (v0 >> (al * 8)) | (v1 << (128 - al * 8));
+ }
+ break;
+#endif
+ default:
+ abort();
+ }
+ } else {
+ if (get_phys_addr(s, &paddr, addr, ACCESS_READ)) {
+ s->pending_tval = addr;
+ s->pending_exception = CAUSE_LOAD_PAGE_FAULT;
+ return -1;
+ }
+ pr = get_phys_mem_range(s->mem_map, paddr);
+ if (!pr) {
+#ifdef DUMP_INVALID_MEM_ACCESS
+ printf("target_read_slow: invalid physical address 0x");
+ print_target_ulong(paddr);
+ printf("\n");
+#endif
+ return 0;
+ } else if (pr->is_ram) {
+ tlb_idx = (addr >> PG_SHIFT) & (TLB_SIZE - 1);
+ ptr = pr->phys_mem + (uintptr_t)(paddr - pr->addr);
+ s->tlb_read[tlb_idx].vaddr = addr & ~PG_MASK;
+ s->tlb_read[tlb_idx].mem_addend = (uintptr_t)ptr - addr;
+ switch(size_log2) {
+ case 0:
+ ret = *(uint8_t *)ptr;
+ break;
+ case 1:
+ ret = *(uint16_t *)ptr;
+ break;
+ case 2:
+ ret = *(uint32_t *)ptr;
+ break;
+#if MLEN >= 64
+ case 3:
+ ret = *(uint64_t *)ptr;
+ break;
+#endif
+#if MLEN >= 128
+ case 4:
+ ret = *(uint128_t *)ptr;
+ break;
+#endif
+ default:
+ abort();
+ }
+ } else {
+ offset = paddr - pr->addr;
+ if (((pr->devio_flags >> size_log2) & 1) != 0) {
+ ret = pr->read_func(pr->opaque, offset, size_log2);
+ }
+#if MLEN >= 64
+ else if ((pr->devio_flags & DEVIO_SIZE32) && size_log2 == 3) {
+ /* emulate 64 bit access */
+ ret = pr->read_func(pr->opaque, offset, 2);
+ ret |= (uint64_t)pr->read_func(pr->opaque, offset + 4, 2) << 32;
+
+ }
+#endif
+ else {
+#ifdef DUMP_INVALID_MEM_ACCESS
+ printf("unsupported device read access: addr=0x");
+ print_target_ulong(paddr);
+ printf(" width=%d bits\n", 1 << (3 + size_log2));
+#endif
+ ret = 0;
+ }
+ }
+ }
+ *pval = ret;
+ return 0;
+}
+
+/* return 0 if OK, != 0 if exception */
+int target_write_slow(RISCVCPUState *s, target_ulong addr,
+ mem_uint_t val, int size_log2)
+{
+ int size, i, tlb_idx, err;
+ target_ulong paddr, offset;
+ uint8_t *ptr;
+ PhysMemoryRange *pr;
+
+ /* first handle unaligned accesses */
+ size = 1 << size_log2;
+ if ((addr & (size - 1)) != 0) {
+ /* XXX: should avoid modifying the memory in case of exception */
+ for(i = 0; i < size; i++) {
+ err = target_write_u8(s, addr + i, (val >> (8 * i)) & 0xff);
+ if (err)
+ return err;
+ }
+ } else {
+ if (get_phys_addr(s, &paddr, addr, ACCESS_WRITE)) {
+ s->pending_tval = addr;
+ s->pending_exception = CAUSE_STORE_PAGE_FAULT;
+ return -1;
+ }
+ pr = get_phys_mem_range(s->mem_map, paddr);
+ if (!pr) {
+#ifdef DUMP_INVALID_MEM_ACCESS
+ printf("target_write_slow: invalid physical address 0x");
+ print_target_ulong(paddr);
+ printf("\n");
+#endif
+ } else if (pr->is_ram) {
+ phys_mem_set_dirty_bit(pr, paddr - pr->addr);
+ tlb_idx = (addr >> PG_SHIFT) & (TLB_SIZE - 1);
+ ptr = pr->phys_mem + (uintptr_t)(paddr - pr->addr);
+ s->tlb_write[tlb_idx].vaddr = addr & ~PG_MASK;
+ s->tlb_write[tlb_idx].mem_addend = (uintptr_t)ptr - addr;
+ switch(size_log2) {
+ case 0:
+ *(uint8_t *)ptr = val;
+ break;
+ case 1:
+ *(uint16_t *)ptr = val;
+ break;
+ case 2:
+ *(uint32_t *)ptr = val;
+ break;
+#if MLEN >= 64
+ case 3:
+ *(uint64_t *)ptr = val;
+ break;
+#endif
+#if MLEN >= 128
+ case 4:
+ *(uint128_t *)ptr = val;
+ break;
+#endif
+ default:
+ abort();
+ }
+ } else {
+ offset = paddr - pr->addr;
+ if (((pr->devio_flags >> size_log2) & 1) != 0) {
+ pr->write_func(pr->opaque, offset, val, size_log2);
+ }
+#if MLEN >= 64
+ else if ((pr->devio_flags & DEVIO_SIZE32) && size_log2 == 3) {
+ /* emulate 64 bit access */
+ pr->write_func(pr->opaque, offset,
+ val & 0xffffffff, 2);
+ pr->write_func(pr->opaque, offset + 4,
+ (val >> 32) & 0xffffffff, 2);
+ }
+#endif
+ else {
+#ifdef DUMP_INVALID_MEM_ACCESS
+ printf("unsupported device write access: addr=0x");
+ print_target_ulong(paddr);
+ printf(" width=%d bits\n", 1 << (3 + size_log2));
+#endif
+ }
+ }
+ }
+ return 0;
+}
+
+struct __attribute__((packed)) unaligned_u32 {
+ uint32_t u32;
+};
+
+/* unaligned access at an address known to be a multiple of 2 */
+static uint32_t get_insn32(uint8_t *ptr)
+{
+#if defined(EMSCRIPTEN)
+ return ((uint16_t *)ptr)[0] | (((uint16_t *)ptr)[1] << 16);
+#else
+ return ((struct unaligned_u32 *)ptr)->u32;
+#endif
+}
+
+/* return 0 if OK, != 0 if exception */
+static no_inline __exception int target_read_insn_slow(RISCVCPUState *s,
+ uint8_t **pptr,
+ target_ulong addr)
+{
+ int tlb_idx;
+ target_ulong paddr;
+ uint8_t *ptr;
+ PhysMemoryRange *pr;
+
+ if (get_phys_addr(s, &paddr, addr, ACCESS_CODE)) {
+ s->pending_tval = addr;
+ s->pending_exception = CAUSE_FETCH_PAGE_FAULT;
+ return -1;
+ }
+ pr = get_phys_mem_range(s->mem_map, paddr);
+ if (!pr || !pr->is_ram) {
+ /* XXX: we only access to execute code from RAM */
+ s->pending_tval = addr;
+ s->pending_exception = CAUSE_FAULT_FETCH;
+ return -1;
+ }
+ tlb_idx = (addr >> PG_SHIFT) & (TLB_SIZE - 1);
+ ptr = pr->phys_mem + (uintptr_t)(paddr - pr->addr);
+ s->tlb_code[tlb_idx].vaddr = addr & ~PG_MASK;
+ s->tlb_code[tlb_idx].mem_addend = (uintptr_t)ptr - addr;
+ *pptr = ptr;
+ return 0;
+}
+
+/* addr must be aligned */
+static inline __exception int target_read_insn_u16(RISCVCPUState *s, uint16_t *pinsn,
+ target_ulong addr)
+{
+ uint32_t tlb_idx;
+ uint8_t *ptr;
+
+ tlb_idx = (addr >> PG_SHIFT) & (TLB_SIZE - 1);
+ if (likely(s->tlb_code[tlb_idx].vaddr == (addr & ~PG_MASK))) {
+ ptr = (uint8_t *)(s->tlb_code[tlb_idx].mem_addend +
+ (uintptr_t)addr);
+ } else {
+ if (target_read_insn_slow(s, &ptr, addr))
+ return -1;
+ }
+ *pinsn = *(uint16_t *)ptr;
+ return 0;
+}
+
+static void tlb_init(RISCVCPUState *s)
+{
+ int i;
+
+ for(i = 0; i < TLB_SIZE; i++) {
+ s->tlb_read[i].vaddr = -1;
+ s->tlb_write[i].vaddr = -1;
+ s->tlb_code[i].vaddr = -1;
+ }
+}
+
+static void tlb_flush_all(RISCVCPUState *s)
+{
+ tlb_init(s);
+}
+
+static void tlb_flush_vaddr(RISCVCPUState *s, target_ulong vaddr)
+{
+ tlb_flush_all(s);
+}
+
+/* XXX: inefficient but not critical as long as it is seldom used */
+static void glue(riscv_cpu_flush_tlb_write_range_ram,
+ MAX_XLEN)(RISCVCPUState *s,
+ uint8_t *ram_ptr, size_t ram_size)
+{
+ uint8_t *ptr, *ram_end;
+ int i;
+
+ ram_end = ram_ptr + ram_size;
+ for(i = 0; i < TLB_SIZE; i++) {
+ if (s->tlb_write[i].vaddr != -1) {
+ ptr = (uint8_t *)(s->tlb_write[i].mem_addend +
+ (uintptr_t)s->tlb_write[i].vaddr);
+ if (ptr >= ram_ptr && ptr < ram_end) {
+ s->tlb_write[i].vaddr = -1;
+ }
+ }
+ }
+}
+
+
+#define SSTATUS_MASK0 (MSTATUS_UIE | MSTATUS_SIE | \
+ MSTATUS_UPIE | MSTATUS_SPIE | \
+ MSTATUS_SPP | \
+ MSTATUS_FS | MSTATUS_XS | \
+ MSTATUS_SUM | MSTATUS_MXR)
+#if MAX_XLEN >= 64
+#define SSTATUS_MASK (SSTATUS_MASK0 | MSTATUS_UXL_MASK)
+#else
+#define SSTATUS_MASK SSTATUS_MASK0
+#endif
+
+
+#define MSTATUS_MASK (MSTATUS_UIE | MSTATUS_SIE | MSTATUS_MIE | \
+ MSTATUS_UPIE | MSTATUS_SPIE | MSTATUS_MPIE | \
+ MSTATUS_SPP | MSTATUS_MPP | \
+ MSTATUS_FS | \
+ MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_MXR)
+
+/* cycle and insn counters */
+#define COUNTEREN_MASK ((1 << 0) | (1 << 2))
+
+/* return the complete mstatus with the SD bit */
+static target_ulong get_mstatus(RISCVCPUState *s, target_ulong mask)
+{
+ target_ulong val;
+ BOOL sd;
+ val = s->mstatus | (s->fs << MSTATUS_FS_SHIFT);
+ val &= mask;
+ sd = ((val & MSTATUS_FS) == MSTATUS_FS) |
+ ((val & MSTATUS_XS) == MSTATUS_XS);
+ if (sd)
+ val |= (target_ulong)1 << (s->cur_xlen - 1);
+ return val;
+}
+
+static int get_base_from_xlen(int xlen)
+{
+ if (xlen == 32)
+ return 1;
+ else if (xlen == 64)
+ return 2;
+ else
+ return 3;
+}
+
+static void set_mstatus(RISCVCPUState *s, target_ulong val)
+{
+ target_ulong mod, mask;
+
+ /* flush the TLBs if change of MMU config */
+ mod = s->mstatus ^ val;
+ if ((mod & (MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_MXR)) != 0 ||
+ ((s->mstatus & MSTATUS_MPRV) && (mod & MSTATUS_MPP) != 0)) {
+ tlb_flush_all(s);
+ }
+ s->fs = (val >> MSTATUS_FS_SHIFT) & 3;
+
+ mask = MSTATUS_MASK & ~MSTATUS_FS;
+#if MAX_XLEN >= 64
+ {
+ int uxl, sxl;
+ uxl = (val >> MSTATUS_UXL_SHIFT) & 3;
+ if (uxl >= 1 && uxl <= get_base_from_xlen(MAX_XLEN))
+ mask |= MSTATUS_UXL_MASK;
+ sxl = (val >> MSTATUS_UXL_SHIFT) & 3;
+ if (sxl >= 1 && sxl <= get_base_from_xlen(MAX_XLEN))
+ mask |= MSTATUS_SXL_MASK;
+ }
+#endif
+ s->mstatus = (s->mstatus & ~mask) | (val & mask);
+}
+
+/* return -1 if invalid CSR. 0 if OK. 'will_write' indicate that the
+ csr will be written after (used for CSR access check) */
+static int csr_read(RISCVCPUState *s, target_ulong *pval, uint32_t csr,
+ BOOL will_write)
+{
+ target_ulong val;
+
+ if (((csr & 0xc00) == 0xc00) && will_write)
+ return -1; /* read-only CSR */
+ if (s->priv < ((csr >> 8) & 3))
+ return -1; /* not enough priviledge */
+
+ switch(csr) {
+#if FLEN > 0
+ case 0x001: /* fflags */
+ if (s->fs == 0)
+ return -1;
+ val = s->fflags;
+ break;
+ case 0x002: /* frm */
+ if (s->fs == 0)
+ return -1;
+ val = s->frm;
+ break;
+ case 0x003:
+ if (s->fs == 0)
+ return -1;
+ val = s->fflags | (s->frm << 5);
+ break;
+#endif
+ case 0xc00: /* ucycle */
+ case 0xc02: /* uinstret */
+ {
+ uint32_t counteren;
+ if (s->priv < PRV_M) {
+ if (s->priv < PRV_S)
+ counteren = s->scounteren;
+ else
+ counteren = s->mcounteren;
+ if (((counteren >> (csr & 0x1f)) & 1) == 0)
+ goto invalid_csr;
+ }
+ }
+ val = (int64_t)s->insn_counter;
+ break;
+ case 0xc80: /* mcycleh */
+ case 0xc82: /* minstreth */
+ if (s->cur_xlen != 32)
+ goto invalid_csr;
+ {
+ uint32_t counteren;
+ if (s->priv < PRV_M) {
+ if (s->priv < PRV_S)
+ counteren = s->scounteren;
+ else
+ counteren = s->mcounteren;
+ if (((counteren >> (csr & 0x1f)) & 1) == 0)
+ goto invalid_csr;
+ }
+ }
+ val = s->insn_counter >> 32;
+ break;
+
+ case 0x100:
+ val = get_mstatus(s, SSTATUS_MASK);
+ break;
+ case 0x104: /* sie */
+ val = s->mie & s->mideleg;
+ break;
+ case 0x105:
+ val = s->stvec;
+ break;
+ case 0x106:
+ val = s->scounteren;
+ break;
+ case 0x140:
+ val = s->sscratch;
+ break;
+ case 0x141:
+ val = s->sepc;
+ break;
+ case 0x142:
+ val = s->scause;
+ break;
+ case 0x143:
+ val = s->stval;
+ break;
+ case 0x144: /* sip */
+ val = s->mip & s->mideleg;
+ break;
+ case 0x180:
+ val = s->satp;
+ break;
+ case 0x300:
+ val = get_mstatus(s, (target_ulong)-1);
+ break;
+ case 0x301:
+ val = s->misa;
+ val |= (target_ulong)s->mxl << (s->cur_xlen - 2);
+ break;
+ case 0x302:
+ val = s->medeleg;
+ break;
+ case 0x303:
+ val = s->mideleg;
+ break;
+ case 0x304:
+ val = s->mie;
+ break;
+ case 0x305:
+ val = s->mtvec;
+ break;
+ case 0x306:
+ val = s->mcounteren;
+ break;
+ case 0x340:
+ val = s->mscratch;
+ break;
+ case 0x341:
+ val = s->mepc;
+ break;
+ case 0x342:
+ val = s->mcause;
+ break;
+ case 0x343:
+ val = s->mtval;
+ break;
+ case 0x344:
+ val = s->mip;
+ break;
+ case 0xb00: /* mcycle */
+ case 0xb02: /* minstret */
+ val = (int64_t)s->insn_counter;
+ break;
+ case 0xb80: /* mcycleh */
+ case 0xb82: /* minstreth */
+ if (s->cur_xlen != 32)
+ goto invalid_csr;
+ val = s->insn_counter >> 32;
+ break;
+ case 0xf14:
+ val = s->mhartid;
+ break;
+ default:
+ invalid_csr:
+#ifdef DUMP_INVALID_CSR
+ /* the 'time' counter is usually emulated */
+ if (csr != 0xc01 && csr != 0xc81) {
+ printf("csr_read: invalid CSR=0x%x\n", csr);
+ }
+#endif
+ *pval = 0;
+ return -1;
+ }
+ *pval = val;
+ return 0;
+}
+
+#if FLEN > 0
+static void set_frm(RISCVCPUState *s, unsigned int val)
+{
+ if (val >= 5)
+ val = 0;
+ s->frm = val;
+}
+
+/* return -1 if invalid roundind mode */
+static int get_insn_rm(RISCVCPUState *s, unsigned int rm)
+{
+ if (rm == 7)
+ return s->frm;
+ if (rm >= 5)
+ return -1;
+ else
+ return rm;
+}
+#endif
+
+/* return -1 if invalid CSR, 0 if OK, 1 if the interpreter loop must be
+ exited (e.g. XLEN was modified), 2 if TLBs have been flushed. */
+static int csr_write(RISCVCPUState *s, uint32_t csr, target_ulong val)
+{
+ target_ulong mask;
+
+#if defined(DUMP_CSR)
+ printf("csr_write: csr=0x%03x val=0x", csr);
+ print_target_ulong(val);
+ printf("\n");
+#endif
+ switch(csr) {
+#if FLEN > 0
+ case 0x001: /* fflags */
+ s->fflags = val & 0x1f;
+ s->fs = 3;
+ break;
+ case 0x002: /* frm */
+ set_frm(s, val & 7);
+ s->fs = 3;
+ break;
+ case 0x003: /* fcsr */
+ set_frm(s, (val >> 5) & 7);
+ s->fflags = val & 0x1f;
+ s->fs = 3;
+ break;
+#endif
+ case 0x100: /* sstatus */
+ set_mstatus(s, (s->mstatus & ~SSTATUS_MASK) | (val & SSTATUS_MASK));
+ break;
+ case 0x104: /* sie */
+ mask = s->mideleg;
+ s->mie = (s->mie & ~mask) | (val & mask);
+ break;
+ case 0x105:
+ s->stvec = val & ~3;
+ break;
+ case 0x106:
+ s->scounteren = val & COUNTEREN_MASK;
+ break;
+ case 0x140:
+ s->sscratch = val;
+ break;
+ case 0x141:
+ s->sepc = val & ~1;
+ break;
+ case 0x142:
+ s->scause = val;
+ break;
+ case 0x143:
+ s->stval = val;
+ break;
+ case 0x144: /* sip */
+ mask = s->mideleg;
+ s->mip = (s->mip & ~mask) | (val & mask);
+ break;
+ case 0x180:
+ /* no ASID implemented */
+#if MAX_XLEN == 32
+ {
+ int new_mode;
+ new_mode = (val >> 31) & 1;
+ s->satp = (val & (((target_ulong)1 << 22) - 1)) |
+ (new_mode << 31);
+ }
+#else
+ {
+ int mode, new_mode;
+ mode = s->satp >> 60;
+ new_mode = (val >> 60) & 0xf;
+ if (new_mode == 0 || (new_mode >= 8 && new_mode <= 9))
+ mode = new_mode;
+ s->satp = (val & (((uint64_t)1 << 44) - 1)) |
+ ((uint64_t)mode << 60);
+ }
+#endif
+ tlb_flush_all(s);
+ return 2;
+
+ case 0x300:
+ set_mstatus(s, val);
+ break;
+ case 0x301: /* misa */
+#if MAX_XLEN >= 64
+ {
+ int new_mxl;
+ new_mxl = (val >> (s->cur_xlen - 2)) & 3;
+ if (new_mxl >= 1 && new_mxl <= get_base_from_xlen(MAX_XLEN)) {
+ /* Note: misa is only modified in M level, so cur_xlen
+ = 2^(mxl + 4) */
+ if (s->mxl != new_mxl) {
+ s->mxl = new_mxl;
+ s->cur_xlen = 1 << (new_mxl + 4);
+ return 1;
+ }
+ }
+ }
+#endif
+ break;
+ case 0x302:
+ mask = (1 << (CAUSE_STORE_PAGE_FAULT + 1)) - 1;
+ s->medeleg = (s->medeleg & ~mask) | (val & mask);
+ break;
+ case 0x303:
+ mask = MIP_SSIP | MIP_STIP | MIP_SEIP;
+ s->mideleg = (s->mideleg & ~mask) | (val & mask);
+ break;
+ case 0x304:
+ mask = MIP_MSIP | MIP_MTIP | MIP_SSIP | MIP_STIP | MIP_SEIP;
+ s->mie = (s->mie & ~mask) | (val & mask);
+ break;
+ case 0x305:
+ s->mtvec = val & ~3;
+ break;
+ case 0x306:
+ s->mcounteren = val & COUNTEREN_MASK;
+ break;
+ case 0x340:
+ s->mscratch = val;
+ break;
+ case 0x341:
+ s->mepc = val & ~1;
+ break;
+ case 0x342:
+ s->mcause = val;
+ break;
+ case 0x343:
+ s->mtval = val;
+ break;
+ case 0x344:
+ mask = MIP_SSIP | MIP_STIP;
+ s->mip = (s->mip & ~mask) | (val & mask);
+ break;
+ default:
+#ifdef DUMP_INVALID_CSR
+ printf("csr_write: invalid CSR=0x%x\n", csr);
+#endif
+ return -1;
+ }
+ return 0;
+}
+
+static void set_priv(RISCVCPUState *s, int priv)
+{
+ if (s->priv != priv) {
+ tlb_flush_all(s);
+#if MAX_XLEN >= 64
+ /* change the current xlen */
+ {
+ int mxl;
+ if (priv == PRV_S)
+ mxl = (s->mstatus >> MSTATUS_SXL_SHIFT) & 3;
+ else if (priv == PRV_U)
+ mxl = (s->mstatus >> MSTATUS_UXL_SHIFT) & 3;
+ else
+ mxl = s->mxl;
+ s->cur_xlen = 1 << (4 + mxl);
+ }
+#endif
+ s->priv = priv;
+ }
+}
+
+static void raise_exception2(RISCVCPUState *s, uint32_t cause,
+ target_ulong tval)
+{
+ BOOL deleg;
+ target_ulong causel;
+
+#if defined(DUMP_EXCEPTIONS) || defined(DUMP_MMU_EXCEPTIONS) || defined(DUMP_INTERRUPTS)
+ {
+ int flag;
+ flag = 0;
+#ifdef DUMP_MMU_EXCEPTIONS
+ if (cause == CAUSE_FAULT_FETCH ||
+ cause == CAUSE_FAULT_LOAD ||
+ cause == CAUSE_FAULT_STORE ||
+ cause == CAUSE_FETCH_PAGE_FAULT ||
+ cause == CAUSE_LOAD_PAGE_FAULT ||
+ cause == CAUSE_STORE_PAGE_FAULT)
+ flag = 1;
+#endif
+#ifdef DUMP_INTERRUPTS
+ flag |= (cause & CAUSE_INTERRUPT) != 0;
+#endif
+#ifdef DUMP_EXCEPTIONS
+ flag = 1;
+ flag = (cause & CAUSE_INTERRUPT) == 0;
+ if (cause == CAUSE_SUPERVISOR_ECALL || cause == CAUSE_ILLEGAL_INSTRUCTION)
+ flag = 0;
+#endif
+ if (flag) {
+ log_printf("raise_exception: cause=0x%08x tval=0x", cause);
+#ifdef CONFIG_LOGFILE
+ fprint_target_ulong(log_file, tval);
+#else
+ print_target_ulong(tval);
+#endif
+ log_printf("\n");
+ dump_regs(s);
+ }
+ }
+#endif
+
+ if (s->priv <= PRV_S) {
+ /* delegate the exception to the supervisor priviledge */
+ if (cause & CAUSE_INTERRUPT)
+ deleg = (s->mideleg >> (cause & (MAX_XLEN - 1))) & 1;
+ else
+ deleg = (s->medeleg >> cause) & 1;
+ } else {
+ deleg = 0;
+ }
+
+ causel = cause & 0x7fffffff;
+ if (cause & CAUSE_INTERRUPT)
+ causel |= (target_ulong)1 << (s->cur_xlen - 1);
+
+ if (deleg) {
+ s->scause = causel;
+ s->sepc = s->pc;
+ s->stval = tval;
+ s->mstatus = (s->mstatus & ~MSTATUS_SPIE) |
+ (((s->mstatus >> s->priv) & 1) << MSTATUS_SPIE_SHIFT);
+ s->mstatus = (s->mstatus & ~MSTATUS_SPP) |
+ (s->priv << MSTATUS_SPP_SHIFT);
+ s->mstatus &= ~MSTATUS_SIE;
+ set_priv(s, PRV_S);
+ s->pc = s->stvec;
+ } else {
+ s->mcause = causel;
+ s->mepc = s->pc;
+ s->mtval = tval;
+ s->mstatus = (s->mstatus & ~MSTATUS_MPIE) |
+ (((s->mstatus >> s->priv) & 1) << MSTATUS_MPIE_SHIFT);
+ s->mstatus = (s->mstatus & ~MSTATUS_MPP) |
+ (s->priv << MSTATUS_MPP_SHIFT);
+ s->mstatus &= ~MSTATUS_MIE;
+ set_priv(s, PRV_M);
+ s->pc = s->mtvec;
+ }
+}
+
+static void raise_exception(RISCVCPUState *s, uint32_t cause)
+{
+ raise_exception2(s, cause, 0);
+}
+
+static void handle_sret(RISCVCPUState *s)
+{
+ int spp, spie;
+ spp = (s->mstatus >> MSTATUS_SPP_SHIFT) & 1;
+ /* set the IE state to previous IE state */
+ spie = (s->mstatus >> MSTATUS_SPIE_SHIFT) & 1;
+ s->mstatus = (s->mstatus & ~(1 << spp)) |
+ (spie << spp);
+ /* set SPIE to 1 */
+ s->mstatus |= MSTATUS_SPIE;
+ /* set SPP to U */
+ s->mstatus &= ~MSTATUS_SPP;
+ set_priv(s, spp);
+ s->pc = s->sepc;
+}
+
+static void handle_mret(RISCVCPUState *s)
+{
+ int mpp, mpie;
+ mpp = (s->mstatus >> MSTATUS_MPP_SHIFT) & 3;
+ /* set the IE state to previous IE state */
+ mpie = (s->mstatus >> MSTATUS_MPIE_SHIFT) & 1;
+ s->mstatus = (s->mstatus & ~(1 << mpp)) |
+ (mpie << mpp);
+ /* set MPIE to 1 */
+ s->mstatus |= MSTATUS_MPIE;
+ /* set MPP to U */
+ s->mstatus &= ~MSTATUS_MPP;
+ set_priv(s, mpp);
+ s->pc = s->mepc;
+}
+
+static inline uint32_t get_pending_irq_mask(RISCVCPUState *s)
+{
+ uint32_t pending_ints, enabled_ints;
+
+ pending_ints = s->mip & s->mie;
+ if (pending_ints == 0)
+ return 0;
+
+ enabled_ints = 0;
+ switch(s->priv) {
+ case PRV_M:
+ if (s->mstatus & MSTATUS_MIE)
+ enabled_ints = ~s->mideleg;
+ break;
+ case PRV_S:
+ enabled_ints = ~s->mideleg;
+ if (s->mstatus & MSTATUS_SIE)
+ enabled_ints |= s->mideleg;
+ break;
+ default:
+ case PRV_U:
+ enabled_ints = -1;
+ break;
+ }
+ return pending_ints & enabled_ints;
+}
+
+static __exception int raise_interrupt(RISCVCPUState *s)
+{
+ uint32_t mask;
+ int irq_num;
+
+ mask = get_pending_irq_mask(s);
+ if (mask == 0)
+ return 0;
+ irq_num = ctz32(mask);
+ raise_exception(s, irq_num | CAUSE_INTERRUPT);
+ return -1;
+}
+
+static inline int32_t sext(int32_t val, int n)
+{
+ return (val << (32 - n)) >> (32 - n);
+}
+
+static inline uint32_t get_field1(uint32_t val, int src_pos,
+ int dst_pos, int dst_pos_max)
+{
+ int mask;
+ assert(dst_pos_max >= dst_pos);
+ mask = ((1 << (dst_pos_max - dst_pos + 1)) - 1) << dst_pos;
+ if (dst_pos >= src_pos)
+ return (val << (dst_pos - src_pos)) & mask;
+ else
+ return (val >> (src_pos - dst_pos)) & mask;
+}
+
+#define XLEN 32
+#include "riscv_cpu_template.h"
+
+#if MAX_XLEN >= 64
+#define XLEN 64
+#include "riscv_cpu_template.h"
+#endif
+
+#if MAX_XLEN >= 128
+#define XLEN 128
+#include "riscv_cpu_template.h"
+#endif
+
+static void glue(riscv_cpu_interp, MAX_XLEN)(RISCVCPUState *s, int n_cycles)
+{
+#ifdef USE_GLOBAL_STATE
+ s = &riscv_cpu_global_state;
+#endif
+ uint64_t timeout;
+
+ timeout = s->insn_counter + n_cycles;
+ while (!s->power_down_flag &&
+ (int)(timeout - s->insn_counter) > 0) {
+ n_cycles = timeout - s->insn_counter;
+ switch(s->cur_xlen) {
+ case 32:
+ riscv_cpu_interp_x32(s, n_cycles);
+ break;
+#if MAX_XLEN >= 64
+ case 64:
+ riscv_cpu_interp_x64(s, n_cycles);
+ break;
+#endif
+#if MAX_XLEN >= 128
+ case 128:
+ riscv_cpu_interp_x128(s, n_cycles);
+ break;
+#endif
+ default:
+ abort();
+ }
+ }
+}
+
+/* Note: the value is not accurate when called in riscv_cpu_interp() */
+static uint64_t glue(riscv_cpu_get_cycles, MAX_XLEN)(RISCVCPUState *s)
+{
+ return s->insn_counter;
+}
+
+static void glue(riscv_cpu_set_mip, MAX_XLEN)(RISCVCPUState *s, uint32_t mask)
+{
+ s->mip |= mask;
+ /* exit from power down if an interrupt is pending */
+ if (s->power_down_flag && (s->mip & s->mie) != 0)
+ s->power_down_flag = FALSE;
+}
+
+static void glue(riscv_cpu_reset_mip, MAX_XLEN)(RISCVCPUState *s, uint32_t mask)
+{
+ s->mip &= ~mask;
+}
+
+static uint32_t glue(riscv_cpu_get_mip, MAX_XLEN)(RISCVCPUState *s)
+{
+ return s->mip;
+}
+
+static BOOL glue(riscv_cpu_get_power_down, MAX_XLEN)(RISCVCPUState *s)
+{
+ return s->power_down_flag;
+}
+
+static RISCVCPUState *glue(riscv_cpu_init, MAX_XLEN)(PhysMemoryMap *mem_map)
+{
+ RISCVCPUState *s;
+
+#ifdef USE_GLOBAL_STATE
+ s = &riscv_cpu_global_state;
+#else
+ s = mallocz(sizeof(*s));
+#endif
+ s->common.class_ptr = &glue(riscv_cpu_class, MAX_XLEN);
+ s->mem_map = mem_map;
+ s->pc = 0x1000;
+ s->priv = PRV_M;
+ s->cur_xlen = MAX_XLEN;
+ s->mxl = get_base_from_xlen(MAX_XLEN);
+ s->mstatus = ((uint64_t)s->mxl << MSTATUS_UXL_SHIFT) |
+ ((uint64_t)s->mxl << MSTATUS_SXL_SHIFT);
+ s->misa |= MCPUID_SUPER | MCPUID_USER | MCPUID_I | MCPUID_M | MCPUID_A;
+#if FLEN >= 32
+ s->misa |= MCPUID_F;
+#endif
+#if FLEN >= 64
+ s->misa |= MCPUID_D;
+#endif
+#if FLEN >= 128
+ s->misa |= MCPUID_Q;
+#endif
+#ifdef CONFIG_EXT_C
+ s->misa |= MCPUID_C;
+#endif
+ tlb_init(s);
+ return s;
+}
+
+static void glue(riscv_cpu_end, MAX_XLEN)(RISCVCPUState *s)
+{
+#ifdef USE_GLOBAL_STATE
+ free(s);
+#endif
+}
+
+static uint32_t glue(riscv_cpu_get_misa, MAX_XLEN)(RISCVCPUState *s)
+{
+ return s->misa;
+}
+
+const RISCVCPUClass glue(riscv_cpu_class, MAX_XLEN) = {
+ glue(riscv_cpu_init, MAX_XLEN),
+ glue(riscv_cpu_end, MAX_XLEN),
+ glue(riscv_cpu_interp, MAX_XLEN),
+ glue(riscv_cpu_get_cycles, MAX_XLEN),
+ glue(riscv_cpu_set_mip, MAX_XLEN),
+ glue(riscv_cpu_reset_mip, MAX_XLEN),
+ glue(riscv_cpu_get_mip, MAX_XLEN),
+ glue(riscv_cpu_get_power_down, MAX_XLEN),
+ glue(riscv_cpu_get_misa, MAX_XLEN),
+ glue(riscv_cpu_flush_tlb_write_range_ram, MAX_XLEN),
+};
+
+#if CONFIG_RISCV_MAX_XLEN == MAX_XLEN
+RISCVCPUState *riscv_cpu_init(PhysMemoryMap *mem_map, int max_xlen)
+{
+ const RISCVCPUClass *c;
+ switch(max_xlen) {
+ /* with emscripten we compile a single CPU */
+#if defined(EMSCRIPTEN)
+ case MAX_XLEN:
+ c = &glue(riscv_cpu_class, MAX_XLEN);
+ break;
+#else
+ case 32:
+ c = &riscv_cpu_class32;
+ break;
+ case 64:
+ c = &riscv_cpu_class64;
+ break;
+#if CONFIG_RISCV_MAX_XLEN == 128
+ case 128:
+ c = &riscv_cpu_class128;
+ break;
+#endif
+#endif /* !EMSCRIPTEN */
+ default:
+ return NULL;
+ }
+ return c->riscv_cpu_init(mem_map);
+}
+#endif /* CONFIG_RISCV_MAX_XLEN == MAX_XLEN */
+