diff --git a/Makefile b/Makefile index 7068968f4..5e8321431 100644 --- a/Makefile +++ b/Makefile @@ -87,6 +87,7 @@ SAIL_VM_SRCS += riscv_vmem_common.sail SAIL_VM_SRCS += riscv_vmem_pte.sail SAIL_VM_SRCS += riscv_vmem_ptw.sail SAIL_VM_SRCS += riscv_vmem_tlb.sail +SAIL_VM_SRCS += riscv_vmem_translate.sail SAIL_VM_SRCS += riscv_vmem.sail # Non-instruction sources diff --git a/model/prelude.sail b/model/prelude.sail index 028af2126..50c2ea857 100644 --- a/model/prelude.sail +++ b/model/prelude.sail @@ -10,6 +10,7 @@ default Order dec $include $include +$include $include $include $include "mapping.sail" diff --git a/model/riscv_addr_checks.sail b/model/riscv_addr_checks.sail index f9c783574..38a7316d2 100644 --- a/model/riscv_addr_checks.sail +++ b/model/riscv_addr_checks.sail @@ -54,7 +54,7 @@ type ext_data_addr_error = unit /* Default data addr is just base register + immediate offset (may be zero). Extensions might override and add additional checks. */ -function ext_data_get_addr(base : regidx, offset : xlenbits, acc : AccessType(ext_access_type), width : word_width) +function ext_data_get_addr(base : regidx, offset : xlenbits, acc : AccessType(ext_access_type), width : range(0, max_mem_access)) -> Ext_DataAddr_Check(ext_data_addr_error) = let addr = X(base) + offset in Ext_DataAddr_OK(addr) diff --git a/model/riscv_insts_base.sail b/model/riscv_insts_base.sail index 9c4630b33..9980c7894 100644 --- a/model/riscv_insts_base.sail +++ b/model/riscv_insts_base.sail @@ -300,52 +300,48 @@ union clause ast = LOAD : (bits(12), regidx, regidx, bool, word_width, bool, boo mapping clause encdec = LOAD(imm, rs1, rd, is_unsigned, size, false, false) if (size_bytes(size) < sizeof(xlen_bytes)) | (not(is_unsigned) & size_bytes(size) <= sizeof(xlen_bytes)) <-> imm @ rs1 @ bool_bits(is_unsigned) @ size_enc(size) @ rd @ 0b0000011 if (size_bytes(size) < sizeof(xlen_bytes)) | (not(is_unsigned) & size_bytes(size) <= sizeof(xlen_bytes)) -val extend_value : forall 'n, 0 < 'n <= xlen_bytes. (bool, MemoryOpResult(bits(8 * 'n))) -> MemoryOpResult(xlenbits) -function extend_value(is_unsigned, value) = match (value) { - MemValue(v) => MemValue(if is_unsigned then zero_extend(v) else sign_extend(v) : xlenbits), - MemException(e) => MemException(e) -} +val extend_value : forall 'n, 0 < 'n <= xlen. (bool, bits('n)) -> xlenbits +function extend_value(is_unsigned, value) = if is_unsigned then zero_extend(value) else sign_extend(value) + +val process_load : forall 'n, 0 < 'n <= xlen_bytes. (regidx, result(bits(8 * 'n), VMemException), bool) -> Retired +function process_load(rd, value, is_unsigned) = + match value { + Ok(result) => { X(rd) = extend_value(is_unsigned, result); RETIRE_SUCCESS }, + Err(vaddr, e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } + } -val process_load : forall 'n, 0 < 'n <= xlen_bytes. (regidx, xlenbits, MemoryOpResult(bits(8 * 'n)), bool) -> Retired -function process_load(rd, vaddr, value, is_unsigned) = - match extend_value(is_unsigned, value) { - MemValue(result) => { X(rd) = result; RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } +function is_aligned(vaddr : xlenbits, width : word_width) -> bool = + match width { + BYTE => true, + HALF => vaddr[0..0] == 0b0, + WORD => vaddr[1..0] == 0b00, + DOUBLE => vaddr[2..0] == 0b000 } +// Return true if the address is misaligned and we don't support misaligned access. function check_misaligned(vaddr : xlenbits, width : word_width) -> bool = - if plat_enable_misaligned_access() then false - else match width { - BYTE => false, - HALF => vaddr[0] == bitone, - WORD => vaddr[0] == bitone | vaddr[1] == bitone, - DOUBLE => vaddr[0] == bitone | vaddr[1] == bitone | vaddr[2] == bitone - } + not(plat_enable_misaligned_access()) & not(is_aligned(vaddr, width)) function clause execute(LOAD(imm, rs1, rd, is_unsigned, width, aq, rl)) = { let offset : xlenbits = sign_extend(imm); + let width_bytes = size_bytes(width); + + // This is checked during decoding. + assert(width_bytes <= sizeof(xlen_bytes)); + /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ - match ext_data_get_addr(rs1, offset, Read(Data), width) { + match ext_data_get_addr(rs1, offset, Read(Data), width_bytes) { Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => + Ext_DataAddr_OK(vaddr) => { + match check_and_handle_load_vaddr_for_triggers(vaddr, get_arch_pc()) { + Some(ret) => return ret, + _ => () + }; if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(paddr, _) => - match (width) { - BYTE => - process_load(rd, vaddr, mem_read(Read(Data), paddr, 1, aq, rl, false), is_unsigned), - HALF => - process_load(rd, vaddr, mem_read(Read(Data), paddr, 2, aq, rl, false), is_unsigned), - WORD => - process_load(rd, vaddr, mem_read(Read(Data), paddr, 4, aq, rl, false), is_unsigned), - DOUBLE if sizeof(xlen) >= 64 => - process_load(rd, vaddr, mem_read(Read(Data), paddr, 8, aq, rl, false), is_unsigned), - _ => report_invalid_width(__FILE__, __LINE__, width, "load") - } - } + else process_load(rd, vmem_read(Read(Data), vaddr, width_bytes, aq, rl, false), is_unsigned) + } } } @@ -380,43 +376,34 @@ mapping clause encdec = STORE(imm7 @ imm5, rs2, rs1, size, false, false) This may need revisiting. */ function clause execute (STORE(imm, rs2, rs1, width, aq, rl)) = { let offset : xlenbits = sign_extend(imm); + let width_bytes = size_bytes(width); + + // This is checked during decoding. + assert(width_bytes <= sizeof(xlen_bytes)); + /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ - match ext_data_get_addr(rs1, offset, Write(Data), width) { + match ext_data_get_addr(rs1, offset, Write(Data), width_bytes) { Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => + Ext_DataAddr_OK(vaddr) => { + match check_and_handle_store_vaddr_for_triggers(vaddr, get_arch_pc()) { + Some(ret) => return ret, + None() => () + }; if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(paddr, _) => { - let eares : MemoryOpResult(unit) = match width { - BYTE => mem_write_ea(paddr, 1, aq, rl, false), - HALF => mem_write_ea(paddr, 2, aq, rl, false), - WORD => mem_write_ea(paddr, 4, aq, rl, false), - DOUBLE => mem_write_ea(paddr, 8, aq, rl, false) - }; - match (eares) { - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - MemValue(_) => { - let rs2_val = X(rs2); - let res : MemoryOpResult(bool) = match (width) { - BYTE => mem_write_value(paddr, 1, rs2_val[7..0], aq, rl, false), - HALF => mem_write_value(paddr, 2, rs2_val[15..0], aq, rl, false), - WORD => mem_write_value(paddr, 4, rs2_val[31..0], aq, rl, false), - DOUBLE if sizeof(xlen) >= 64 - => mem_write_value(paddr, 8, rs2_val, aq, rl, false), - _ => report_invalid_width(__FILE__, __LINE__, width, "store"), - }; - match (res) { - MemValue(true) => RETIRE_SUCCESS, - MemValue(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - } - } + else match vmem_write_translate(vaddr, width_bytes, Write(Data), aq, rl, false) { + Err(vaddr, e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + Ok(state) => { + let rs2_val = X(rs2); + match vmem_write_value(state, rs2_val[width_bytes * 8 - 1 .. 0]) { + Ok(true) => RETIRE_SUCCESS, + Ok(false) => internal_error(__FILE__, __LINE__, "store got false from mem_write_value"), + Err(vaddr, e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } } } } + } } } diff --git a/model/riscv_insts_fext.sail b/model/riscv_insts_fext.sail index e4afb30d5..3e84d2e59 100644 --- a/model/riscv_insts_fext.sail +++ b/model/riscv_insts_fext.sail @@ -282,60 +282,29 @@ mapping clause encdec = LOAD_FP(imm, rs1, rd, DOUBLE) if haveDExt() /* Execution semantics ================================ */ -val process_fload64 : (regidx, xlenbits, MemoryOpResult(bits(64))) - -> Retired -function process_fload64(rd, addr, value) = - if sizeof(flen) == 64 - then match value { - MemValue(result) => { F(rd) = result; RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(addr, e); RETIRE_FAIL } - } - else { - /* should not get here */ - RETIRE_FAIL - } - -val process_fload32 : (regidx, xlenbits, MemoryOpResult(bits(32))) - -> Retired -function process_fload32(rd, addr, value) = - match value { - MemValue(result) => { F(rd) = nan_box(result); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(addr, e); RETIRE_FAIL } - } +function clause execute(LOAD_FP(imm, rs1, rd, width)) = { + let width_bytes = word_width_bytes(width); -val process_fload16 : (regidx, xlenbits, MemoryOpResult(bits(16))) - -> Retired -function process_fload16(rd, addr, value) = - match value { - MemValue(result) => { F(rd) = nan_box(result); RETIRE_SUCCESS }, - MemException(e) => { handle_mem_exception(addr, e); RETIRE_FAIL } - } + // This is checked during decoding. + assert(width_bytes <= sizeof(xlen_bytes)); -function clause execute(LOAD_FP(imm, rs1, rd, width)) = { let offset : xlenbits = sign_extend(imm); /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ - match ext_data_get_addr(rs1, offset, Read(Data), width) { + match ext_data_get_addr(rs1, offset, Read(Data), width_bytes) { Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => + Ext_DataAddr_OK(vaddr) => { + match check_and_handle_load_vaddr_for_triggers(vaddr, get_arch_pc()) { + Some(ret) => return ret, + None() => () + }; if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_Load_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Read(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(addr, _) => { - let (aq, rl, res) = (false, false, false); - match (width) { - BYTE => { handle_illegal(); RETIRE_FAIL }, - HALF => - process_fload16(rd, vaddr, mem_read(Read(Data), addr, 2, aq, rl, res)), - WORD => - process_fload32(rd, vaddr, mem_read(Read(Data), addr, 4, aq, rl, res)), - DOUBLE if sizeof(flen) >= 64 => - process_fload64(rd, vaddr, mem_read(Read(Data), addr, 8, aq, rl, res)), - _ => report_invalid_width(__FILE__, __LINE__, width, "floating point load"), - } - } + else match vmem_read(Read(Data), vaddr, width_bytes, false, false, false) { + Ok(result) => { F(rd) = nan_box(result); RETIRE_SUCCESS }, + Err(vaddr, e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, } + } } } @@ -368,49 +337,32 @@ mapping clause encdec = STORE_FP(imm7 @ imm5, rs2, rs1, DOUBLE) /* Execution semantics ================================ */ -val process_fstore : (xlenbits, MemoryOpResult(bool)) -> Retired -function process_fstore(vaddr, value) = - match value { - MemValue(true) => { RETIRE_SUCCESS }, - MemValue(false) => { internal_error(__FILE__, __LINE__, "store got false from mem_write_value") }, - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } - } - function clause execute (STORE_FP(imm, rs2, rs1, width)) = { + let width_bytes = word_width_bytes(width); + + // This is checked during decoding. + assert(width_bytes <= sizeof(xlen_bytes)); + let offset : xlenbits = sign_extend(imm); - let (aq, rl, con) = (false, false, false); /* Get the address, X(rs1) + offset. Some extensions perform additional checks on address validity. */ - match ext_data_get_addr(rs1, offset, Write(Data), width) { + match ext_data_get_addr(rs1, offset, Write(Data), width_bytes) { Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, - Ext_DataAddr_OK(vaddr) => + Ext_DataAddr_OK(vaddr) => { if check_misaligned(vaddr, width) then { handle_mem_exception(vaddr, E_SAMO_Addr_Align()); RETIRE_FAIL } - else match translateAddr(vaddr, Write(Data)) { - TR_Failure(e, _) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - TR_Address(addr, _) => { - let eares : MemoryOpResult(unit) = match width { - BYTE => MemValue () /* bogus placeholder for illegal size */, - HALF => mem_write_ea(addr, 2, aq, rl, false), - WORD => mem_write_ea(addr, 4, aq, rl, false), - DOUBLE => mem_write_ea(addr, 8, aq, rl, false) - }; - match (eares) { - MemException(e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, - MemValue(_) => { - let rs2_val = F(rs2); - match (width) { - BYTE => { handle_illegal(); RETIRE_FAIL }, - HALF => process_fstore (vaddr, mem_write_value(addr, 2, rs2_val[15..0], aq, rl, con)), - WORD => process_fstore (vaddr, mem_write_value(addr, 4, rs2_val[31..0], aq, rl, con)), - DOUBLE if sizeof(flen) >= 64 => - process_fstore (vaddr, mem_write_value(addr, 8, rs2_val, aq, rl, con)), - _ => report_invalid_width(__FILE__, __LINE__, width, "floating point store"), - }; - } + else match vmem_write_translate(vaddr, width_bytes, Write(Data), false, false, false) { + Err(vaddr, e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL }, + Ok(state) => { + let rs2_val = F(rs2); + match vmem_write_value(state, rs2_val[width_bytes * 8 - 1 .. 0]) { + Ok(true) => { RETIRE_SUCCESS }, + Ok(false) => { internal_error(__FILE__, __LINE__, "store got false from mem_write_value") }, + Err(vaddr, e) => { handle_mem_exception(vaddr, e); RETIRE_FAIL } } } } + } } } diff --git a/model/riscv_vmem.sail b/model/riscv_vmem.sail index 2d1e5d162..effe8ad2a 100644 --- a/model/riscv_vmem.sail +++ b/model/riscv_vmem.sail @@ -6,447 +6,249 @@ /* SPDX-License-Identifier: BSD-2-Clause */ /*=======================================================================================*/ -// **************************************************************** -// Virtual memory address translation and memory protection, -// including PTWs (Page Table Walks) and TLBs (Translation Look-aside Buffers) -// Supported VM modes: Sv32, Sv39, Sv48. TODO: Sv57 - -// STYLE NOTES: -// PRIVATE items are used only within this VM code. -// PUBLIC items are invoked from other parts of sail-riscv. - -// TLB NOTE: -// TLBs are not part of the RISC-V architecture specification. -// However, we model a simple TLB so that -// (1) we can meaningfully test SFENCE.VMA which is a no-op wihout TLBs; -// (2) we can greatly speed up simulation speed -// (e.g., from 10s or minutes to few minutes for Linux boot) -// The TLB implementation is in a separate file: riscv_vmem_tlb.sail -// The code in this file is structured and commented so you can easily -// ignore TLB functionality at first reading. - -// **************************************************************** -// Fields of virtual addresses - -// PRIVATE: Extract full VPN field from VA -function vpns_of_va(sv_params : SV_Params, - va : bits(64)) -> bits(64) = { - let mask : bits(64) = zero_extend(ones(sv_params.va_size_bits)); - (va & mask) >> pagesize_bits -} - -// PRIVATE: Extract VPN[level] from VA -function vpn_j_of_va(sv_params : SV_Params, - va : bits(64), - level : PTW_Level) -> bits(64) = { - let lsb : range(0,63) = pagesize_bits + level * sv_params.vpn_size_bits; - assert (lsb < sizeof(xlen)); - let mask : bits(64) = zero_extend(ones(sv_params.vpn_size_bits)); - ((va >> lsb) & mask) +// Exponent of the page size. 2^12 = 4096 bytes. +type page_size_exp : Int = 12 +let page_size_exp = sizeof(page_size_exp) +type page_size : Int = 2 ^ page_size_exp +let page_size = sizeof(page_size) + +// Exception when accessing an address, together with the offending virtual address. +type VMemException = (xlenbits, ExceptionType) + +// An address, plus an optional second address. This is used for address +// translation of accesses that may span page boundaries. It's not possible +// to span more than one page boundary so we only need a max of two addresses. +type xlenbits2 = (xlenbits, option(xlenbits)) + +// For an access that may be split across a page boundary, returns the width +// of each half. If it isn't split the second value will be 0. +val splitAccessWidths : forall 'w, 0 <= 'w <= max_mem_access . (xlenbits, int('w)) -> + {'w0 'w1, 'w0 >= 0 & 'w1 >= 0 & 'w0 + 'w1 == 'w . (int('w0), int('w1))} + +function splitAccessWidths(addr, width) = { + let offset_in_page = unsigned(addr[page_size_exp - 1 .. 0]); + let width0 = page_size - offset_in_page; + let width0 : range(0, 'w) = if width0 <= width then width0 else width; + let width1 = width - width0; + (width0, width1) } -// PRIVATE: Extract offset within page from VA -function offset_of_va(va : bits(64)) -> bits(PAGESIZE_BITS) = va[pagesize_bits - 1 .. 0] - -// Valid xlen-wide values containing virtual addrs must have upper -// bits equal to the MSB of the virtual address. -// Virtual address widths depend on the virtual memory mode. -// PRIVATE -function is_valid_vAddr(struct { va_size_bits, _ } : SV_Params, - vAddr : bits(64)) -> bool = - vAddr == sign_extend(vAddr[va_size_bits - 1 .. 0]) - -// **************************************************************** -// Results of Page Table Walk (PTW) +// Translate virtual to physical address(es) for a multi-byte memory access. +// Because the access may be across a page boundary this may result in multiple +// physical addresses. If that is the case, the second physical address will +// point to the start of a page. +function translateRange( + vaddr : xlenbits, + width : range(0, max_mem_access), + typ : AccessType(ext_access_type), +) -> result((xlenbits2, ext_ptw), VMemException) = { + // Widths of each access. + let (width0, width1) = splitAccessWidths(vaddr, width); + assert(width0 + width1 == width); + + // Translate the first address, return failure if it fails. + let (paddr0, ext0) : (xlenbits, ext_ptw) = match translateAddr(vaddr, typ) { + TR_Address(paddr) => paddr, + TR_Failure(exc) => return Err(vaddr, exc), + }; -// PRIVATE -union PTW_Result = { - PTW_Success: (bits(64), bits(64), bits(64), nat, bool, ext_ptw), - PTW_Failure: (PTW_Error, ext_ptw) + // If the second access exists then we need to translate its address. + let paddr1 = if width1 != 0 then { + // Second address to translate is at the start of the next page (wrapped). + let vaddr1 = vaddr + width0; + let (paddr1, _ext1) : (xlenbits, ext_ptw) = match translateAddr(vaddr1, typ) { + TR_Address(paddr) => paddr, + TR_Failure(exc) => return Err(vaddr1, exc), + }; + // ext1 is discarded currently. It is only used for CHERI and I have + // not investigated how it should work in this case. + + // If the pages are contiguous we don't need to split the access. + if paddr1 - paddr0 != vaddr1 - vaddr then Some(paddr1) else None() + } else { + None() + }; + Ok((paddr0, paddr1), ext0) } -// **************************************************************** -// Page Table Walk (PTW) - -// Note: 'pt_walk()' is recursive => needs separate 'val' decls - -// PRIVATE -val pt_walk : (SV_Params, - bits(64), // virtual addr - AccessType(ext_access_type), // Read/Write/ReadWrite/Execute - Privilege, // User/Supervisor/Machine - bool, // mstatus.MXR - bool, // do_sum - bits(64), // PT base addr - PTW_Level, // tree level for this recursive call - bool, // global translation, - ext_ptw) // ext_ptw - -> PTW_Result - -function pt_walk(sv_params, - va, - ac, - priv, - mxr, - do_sum, - pt_base, - level, - global, - ext_ptw) = { - let vpn_j = vpn_j_of_va(sv_params, va, level); - let pte_offset = vpn_j << sv_params.log_pte_size_bytes; - let pte_addr = pt_base + pte_offset; - - // In Sv32, physical addrs are actually 34 bits, not XLEN(=32) bits. - // Below, 'pte_phys_addr' is XLEN bits because it's an arg to - // 'mem_read_priv()' [riscv_mem.sail] where it's declared as xlenbits. - // That def and this use need to be fixed together (TODO) - let pte_phys_addr : xlenbits = pte_addr[(sizeof(xlen) - 1) .. 0]; - - // Read this-level PTE from mem - let mem_result = mem_read_priv(Read(Data), // AccessType - Supervisor, // Privilege - pte_phys_addr, - 8, // atom (8) - false, // aq - false, // rl - false); // res - - match mem_result { - MemException(_) => PTW_Failure(PTW_Access(), ext_ptw), - MemValue(pte) => { - let pte_flags = Mk_PTE_Flags(pte[7 .. 0]); - if pte_is_invalid(pte_flags) then - PTW_Failure(PTW_Invalid_PTE(), ext_ptw) - else { - let ppns : bits(64) = PPNs_of_PTE(sv_params, pte); - let global' = global | (pte_flags[G] == 0b1); - if pte_is_ptr(pte_flags) then { - // Non-Leaf PTE - if level > 0 then { - // follow the pointer to walk next level - let pt_base' : bits(64) = ppns << pagesize_bits; - let level' = level - 1; - pt_walk(sv_params, va, ac, priv, mxr, do_sum, - pt_base', level', global', ext_ptw) - } - else - // level 0 PTE, but contains a pointer instead of a leaf - PTW_Failure(PTW_Invalid_PTE(), ext_ptw) - } - else { - // Leaf PTE - let ext_pte = msbs_of_PTE(sv_params, pte); - let pte_check = check_PTE_permission(ac, priv, mxr, do_sum, pte_flags, - ext_pte, ext_ptw); - match pte_check { - PTE_Check_Failure(ext_ptw, ext_ptw_fail) => - PTW_Failure(ext_get_ptw_error(ext_ptw_fail), ext_ptw), - PTE_Check_Success(ext_ptw) => - if level > 0 then { - // Superpage; construct mask for lower-level PPNs from the PTE - let mask_bits = level * sv_params.pte_PPN_j_size_bits; - // Clear the lowest `mask_bits` bits. - let ppns_masked = (ppns >> mask_bits) << mask_bits; - if not(ppns == ppns_masked) then - // misaligned superpage mapping - PTW_Failure(PTW_Misaligned(), ext_ptw) - else { - // Compose final PA in superpage: - // Superpage PPN + lower VPNs + pagesize_bits page-offset - let mask : bits(64) = ~ (ones() << mask_bits); - let ppn = ppns | (vpns_of_va(sv_params, va) & mask); - let pa = (ppn << pagesize_bits) | zero_extend(offset_of_va(va)); - PTW_Success(pa, pte, pte_addr, level, global', ext_ptw) - } - } - else { - let pa = (ppns << pagesize_bits) | zero_extend(offset_of_va(va)); - PTW_Success(pa, pte, pte_addr, level, global', ext_ptw) - } - } - } +// Read memory but potentially using two accesses if `paddrs` contains two +// entries. The results are then combined back into a single value. This is +// needed when reading across a page boundary. +function vmem_read_priv_meta forall 'n, 0 < 'n < max_mem_access . ( + typ : AccessType(ext_access_type), + priv : Privilege, + vaddr : xlenbits, + width : int('n), + aq : bool, + rel : bool, + res : bool, + meta : bool, +) -> result((bits(8 * 'n), mem_meta), VMemException) = { + match translateRange(vaddr, width, typ) { + Err(e) => Err(e), + Ok(paddrs, ext) => { + match paddrs { + (paddr0, None()) => match checked_mem_read(typ, priv, paddr0, width, aq, rel, res, meta) { + MemValue(v, meta) => Ok(v, meta), + MemException(e) => Err(vaddr, e), + }, + (paddr0, Some(paddr1)) => { + let (width0 as int('w0), width1 as int('w1)) = splitAccessWidths(paddr0, width); + assert(width0 > 0 & width1 > 0); + + let (val0, meta0) : (bits(8 * 'w0), mem_meta) = match checked_mem_read(typ, priv, paddr0, width0, aq, rel, res, meta) { + MemValue(v, meta) => (v, meta), + MemException(e) => return Err(vaddr, e), + }; + let (val1, meta1) : (bits(8 * 'w1), mem_meta) = match checked_mem_read(typ, priv, paddr1, width1, aq, rel, res, meta) { + MemValue(v, meta) => (v, meta), + MemException(e) => return Err(vaddr + width0, e), + }; + // TODO: How should we combine meta0 and meta1? + Ok((val1 @ val0, meta0)) + }, } - } + }, } } -// **************************************************************** -// Architectural SATP CSR - -// PUBLIC: see also riscv_insts_zicsr.sail and other CSR-related files -register satp : xlenbits - -// See riscv_sys_regs.sail for legalize_satp{32,64}(). -// WARNING: those functions legalize Mode but not ASID? -// PUBLIC: invoked from writeCSR() to fixup WARL fields -function legalize_satp(a : Architecture, - o : xlenbits, // previous value of satp - v : xlenbits) // proposed new value of satp - -> xlenbits = { // new legal value of satp - if sizeof(xlen) == 32 then { - // The slice and extend ops below are no-ops when xlen==32, - // but appease the type-checker when xlen==64 (when this code is not executed!) - let o32 : bits(32) = o[31 .. 0]; - let v32 : bits(32) = v[31 .. 0]; - let new_satp : bits(32) = legalize_satp32(a, o32, v32); - zero_extend(new_satp); - } else if sizeof(xlen) == 64 then { - // The extend and truncate ops below are no-ops when xlen==64, - // but appease the type-checker when xlen==32 (when this code is not executed!) - let o64 : bits(64) = zero_extend(o); - let v64 : bits(64) = zero_extend(v); - let new_satp : bits(64) = legalize_satp64(a, o64, v64); - truncate(new_satp, sizeof(xlen)) - } else - internal_error(__FILE__, __LINE__, "Unsupported xlen" ^ dec_str(sizeof(xlen))) +struct _WriteIntermediateState('n) = { + vaddr : xlenbits, + paddrs : xlenbits2, + width : int('n), + typ : AccessType(ext_access_type), + aq : bool, + rl : bool, + con : bool, } -// ---------------- -// Fields of SATP - -// ASID is 9b in Sv32, 16b in Sv39/Sv48/Sv57: we use 16b for both -// PRIVATE -function satp_to_asid(satp_val : xlenbits) -> asidbits = - if sizeof(xlen) == 32 then zero_extend(Mk_Satp32(satp_val)[Asid]) - else if sizeof(xlen) == 64 then Mk_Satp64(satp_val)[Asid] - else internal_error(__FILE__, __LINE__, - "Unsupported xlen" ^ dec_str(sizeof(xlen))) - -// Result is 64b to cover both RV32 and RV64 addrs -// PRIVATE -function satp_to_PT_base(satp_val : xlenbits) -> bits(64) = { - let ppn = if sizeof(xlen) == 32 then zero_extend (64, Mk_Satp32(satp_val)[PPN]) - else if sizeof(xlen) == 64 then zero_extend (64, Mk_Satp64(satp_val)[PPN]) - else internal_error(__FILE__, __LINE__, - "Unsupported xlen" ^ dec_str(sizeof(xlen))); - ppn << pagesize_bits -} - -// Compute address translation mode from SATP register -// PRIVATE -function translationMode(priv : Privilege) -> SATPMode = { - if priv == Machine then - Sbare - else if sizeof(xlen) == 32 then - match Mk_Satp32(satp)[Mode] { - 0b0 => Sbare, - 0b1 => Sv32 - } - else if sizeof(xlen) == 64 then { - // Translation mode is based on mstatus.SXL, which could be RV32 when xlen==64 - let arch = architecture(get_mstatus_SXL(mstatus)); - match arch { - Some(RV64) => { let mbits : bits(4) = satp[63 .. 60]; - match satp64Mode_of_bits(RV64, mbits) { // see riscv_types.sail - Some(m) => m, - None() => internal_error(__FILE__, __LINE__, - "invalid RV64 translation mode in satp") - } - }, - Some(RV32) => match Mk_Satp32(satp[31 .. 0])[Mode] { // Note: satp is 64bits here - // When xlen is 64, mstatus.SXL (for S privilege) can be RV32 - 0b0 => Sbare, - 0b1 => Sv32 - }, - _ => internal_error(__FILE__, __LINE__, "unsupported address translation arch") - } - } - else - internal_error(__FILE__, __LINE__, "unsupported xlen") -} - -// **************************************************************** -// VA to PA translation - -// Result of address translation - -// PUBLIC -union TR_Result('paddr : Type, 'failure : Type) = { - TR_Address : ('paddr, ext_ptw), - TR_Failure : ('failure, ext_ptw) -} - -// This function can be ignored on first reading since TLBs are not -// part of RISC-V architecture spec (see TLB_NOTE above). -// PRIVATE: translate on TLB hit, and maintenance of PTE in TLB -function translate_TLB_hit(sv_params : SV_Params, - asid : asidbits, - ptb : bits(64), - vAddr : bits(64), - ac : AccessType(ext_access_type), - priv : Privilege, - mxr : bool, - do_sum : bool, - ext_ptw : ext_ptw, - tlb_index : nat, - ent : TLB_Entry) - -> TR_Result(bits(64), PTW_Error) = { - let pte = ent.pte; - let ext_pte = msbs_of_PTE(sv_params, pte); - let pte_flags = Mk_PTE_Flags(pte[7 .. 0]); - let pte_check = check_PTE_permission(ac, priv, mxr, do_sum, pte_flags, - ext_pte, - ext_ptw); - match pte_check { - PTE_Check_Failure(ext_ptw, ext_ptw_fail) => - TR_Failure(ext_get_ptw_error(ext_ptw_fail), ext_ptw), - PTE_Check_Success(ext_ptw) => - match update_PTE_Bits(sv_params, pte, ac) { - None() => TR_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw), - Some(pte') => - // See riscv_platform.sail - if not(plat_enable_dirty_update()) then - // pte needs dirty/accessed update but that is not enabled - TR_Failure(PTW_PTE_Update(), ext_ptw) - else { - // Writeback the PTE (which has new A/D bits) - let n_ent = {ent with pte=pte'}; - write_TLB(tlb_index, n_ent); - let pte_phys_addr = ent.pteAddr[(sizeof(xlen) - 1) .. 0]; - let mv = mem_write_value_priv(pte_phys_addr, - 8, - pte', - Supervisor, - false, - false, - false); - match mv { - MemValue(_) => (), - MemException(e) => internal_error(__FILE__, __LINE__, - "invalid physical address in TLB") - }; - TR_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw) - } - } - } -} - -// PRIVATE: translate on TLB miss (do a page-table walk) -function translate_TLB_miss(sv_params : SV_Params, - asid : asidbits, - ptb : bits(64), - vAddr : bits(64), - ac : AccessType(ext_access_type), - priv : Privilege, - mxr : bool, - do_sum : bool, - ext_ptw : ext_ptw) -> TR_Result(bits(64), PTW_Error) = { - let initial_level = sv_params.levels - 1; - let ptw_result = pt_walk(sv_params, vAddr, ac, priv, mxr, do_sum, - ptb, initial_level, false, ext_ptw); - match ptw_result { - PTW_Failure(f, ext_ptw) => TR_Failure(f, ext_ptw), - PTW_Success(pAddr, pte, pteAddr, level, global, ext_ptw) => { - let ext_pte = msbs_of_PTE(sv_params, pte); - // Without TLBs, this 'match' expression can be replaced simply - // by: 'TR_Address(pAddr, ext_ptw)' (see TLB_NOTE above) - match update_PTE_Bits(sv_params, pte, ac) { - None() => { - add_to_TLB(asid, vAddr, pAddr, pte, pteAddr, level, global, - sv_params.vpn_size_bits, - pagesize_bits); - TR_Address(pAddr, ext_ptw) +function vmem_write_translate forall 'n, 0 < 'n <= max_mem_access . ( + vaddr : xlenbits, + width : int('n), + typ : AccessType(ext_access_type), + aq : bool, + rl : bool, + con : bool, +) -> result(_WriteIntermediateState('n), VMemException) = { + // Translate both addresses. + match translateRange(vaddr, width, typ) { + Err(e) => Err(e), + Ok(paddrs, ext) => { + match paddrs { + (paddr0, None()) => match mem_write_ea(paddr0, width, aq, rl, con) { + MemValue() => Ok(struct { vaddr, paddrs, width, typ, aq, rl, con }), + MemException(e) => Err(vaddr, e), + }, + (paddr0, Some(paddr1)) => { + let (width0, width1) = splitAccessWidths(paddr0, width); + assert(width0 > 0 & width1 > 0); + + match mem_write_ea(paddr0, width0, aq, rl, con) { + MemValue() => (), + MemException(e) => return Err(vaddr, e), + }; + match mem_write_ea(paddr1, width1, aq, rl, con) { + MemValue() => (), + MemException(e) => return Err(vaddr + width0, e), + }; + Ok(struct { vaddr, paddrs, width, typ, aq, rl, con }) }, - Some(pte') => - // See riscv_platform.sail - if not(plat_enable_dirty_update()) then - // pte needs dirty/accessed update but that is not enabled - TR_Failure(PTW_PTE_Update(), ext_ptw) - else { - // Writeback the PTE (which has new A/D bits) - let pte_phys_addr = pteAddr[(sizeof(xlen) - 1) .. 0]; - let mv = mem_write_value_priv(pte_phys_addr, // pteAddr, - 8, - pte', - Supervisor, - false, - false, - false); - match mv { - MemValue(_) => { - add_to_TLB(asid, vAddr, pAddr, pte', pteAddr, level, global, - sv_params.vpn_size_bits, - pagesize_bits); - TR_Address(pAddr, ext_ptw) - }, - MemException(e) => - TR_Failure(PTW_Access(), ext_ptw) - } - } - } } - } -} - -// PRIVATE -function translate(sv_params : SV_Params, - asid : asidbits, - ptb : bits(64), - vAddr_arg : bits(64), - ac : AccessType(ext_access_type), - priv : Privilege, - mxr : bool, - do_sum : bool, - ext_ptw : ext_ptw) - -> TR_Result(bits(64), PTW_Error) = { - let va_mask : bits(64) = zero_extend(ones(sv_params.va_size_bits)); - let vAddr = (vAddr_arg & va_mask); - - // On first reading, assume lookup_TLB returns None(), since TLBs - // are not part of RISC-V archticture spec (see TLB_NOTE above) - match lookup_TLB(asid, vAddr) { - Some(index, ent) => translate_TLB_hit(sv_params, asid, ptb, vAddr, ac, priv, - mxr, do_sum, ext_ptw, index, ent), - None() => translate_TLB_miss(sv_params, asid, ptb, vAddr, ac, priv, - mxr, do_sum, ext_ptw) + }, } } -// Top-level addr-translation function -// PUBLIC: invoked from instr-fetch and load/store/amo -function translateAddr(vAddr : xlenbits, - ac : AccessType(ext_access_type)) - -> TR_Result(xlenbits, ExceptionType) = { - // Internally the vmem code works with 64-bit values, whether xlen==32 or xlen==64 - // This 'extend' is a no-op when xlen==64 and extends when xlen==32 - let vAddr_64b : bits(64) = zero_extend(vAddr); - // Effective privilege takes into account mstatus.PRV, mstatus.MPP - // See riscv_sys_regs.sail for effectivePrivilege() and cur_privilege - let effPriv : Privilege = effectivePrivilege(ac, mstatus, cur_privilege); - let mode : SATPMode = translationMode(effPriv); - let (valid_va, sv_params) : (bool, SV_Params) = match mode { - Sbare => return TR_Address(vAddr, init_ext_ptw), - Sv32 => (true, sv32_params), - Sv39 => (is_valid_vAddr(sv39_params, vAddr_64b), sv39_params), - Sv48 => (is_valid_vAddr(sv48_params, vAddr_64b), sv48_params), - // Sv57 => (is_valid_vAddr(sv57_params, vAddr_64b), sv57_params), // TODO - }; - if not(valid_va) then - TR_Failure(translationException(ac, PTW_Invalid_Addr()), init_ext_ptw) - else { - let mxr = mstatus.MXR() == 0b1; - let do_sum = mstatus.SUM() == 0b1; - let asid : asidbits = satp_to_asid(satp); - let ptb : bits(64) = satp_to_PT_base(satp); - let tr_result1 = translate(sv_params, - asid, - ptb, - vAddr_64b, - ac, effPriv, mxr, do_sum, - init_ext_ptw); - // Fixup result PA or exception - match tr_result1 { - TR_Address(pa, ext_ptw) => TR_Address(truncate(pa, sizeof(xlen)), ext_ptw), - TR_Failure(f, ext_ptw) => TR_Failure(translationException(ac, f), ext_ptw) - } +function vmem_write_value_priv_meta forall 'n, 0 < 'n <= max_mem_access . ( + state : _WriteIntermediateState('n), + value : bits(8 * 'n), + priv : Privilege, + meta : mem_meta, +) -> result(bool, VMemException) = { + let (vaddr, paddrs, width, typ, aq, rl, con) = (state.vaddr, state.paddrs, state.width, state.typ, state.aq, state.rl, state.con); + + match paddrs { + (paddr0, None()) => match checked_mem_write(paddr0, width, value, typ, priv, meta, aq, rl, con) { + MemValue(ok) => Ok(ok), + MemException(e) => Err(vaddr, e), + }, + (paddr0, Some(paddr1)) => { + let (width0, width1) = splitAccessWidths(paddr0, width); + assert(width0 > 0 & width1 > 0); + + let res0 : bool = match checked_mem_write(paddr0, width0, value[8 * width0 - 1 .. 0], typ, priv, meta, aq, rl, con) { + MemValue(ok) => ok, + MemException(e) => return Err(vaddr, e), + }; + let res1 : bool = match checked_mem_write(paddr1, width1, value[8 * width - 1 .. 8 * width0], typ, priv, meta, aq, rl, con) { + MemValue(ok) => ok, + MemException(e) => return Err(vaddr, e), + }; + // TODO: The semantics of this return value are unclear, and currently always true. + Ok(res1 & res0) + }, } } -// **************************************************************** -// Initialize Virtual Memory state -// PUBLIC: invoked from init_model() -function init_vmem() -> unit = init_TLB() +// Convenience functions wrapping the above functions to match those in riscv_mem.sail. + +// Don't read or return meta. +function vmem_read_priv forall 'n, 0 < 'n < max_mem_access . ( + typ : AccessType(ext_access_type), + priv : Privilege, + vaddr : xlenbits, + width : int('n), + aq : bool, + rel : bool, + res : bool, +) -> result(bits(8 * 'n), VMemException) = + match vmem_read_priv_meta(typ, priv, vaddr, width, aq, rel, res, false) { + Ok(v, _) => Ok(v), + Err(vaddr, e) => Err(vaddr, e), + } -// **************************************************************** +// Use default privilege. +function vmem_read_meta forall 'n, 0 < 'n < max_mem_access . ( + typ : AccessType(ext_access_type), + vaddr : xlenbits, + width : int('n), + aq : bool, + rel : bool, + res : bool, + meta : bool, +) -> result((bits(8 * 'n), mem_meta), VMemException) = + vmem_read_priv_meta(typ, effectivePrivilege(typ, mstatus, cur_privilege), vaddr, width, aq, rel, res, meta) + +// Don't read or return meta and use default privilege. +function vmem_read forall 'n, 0 < 'n < max_mem_access . ( + typ : AccessType(ext_access_type), + vaddr : xlenbits, + width : int('n), + aq : bool, + rel : bool, + res : bool, +) -> result(bits(8 * 'n), VMemException) = + vmem_read_priv(typ, effectivePrivilege(typ, mstatus, cur_privilege), vaddr, width, aq, rel, res) + +// Write default metadata. +function vmem_write_value_priv forall 'n, 0 < 'n <= max_mem_access . ( + state : _WriteIntermediateState('n), + value : bits(8 * 'n), + priv : Privilege, +) -> result(bool, VMemException) = + vmem_write_value_priv_meta(state, value, priv, default_meta) + +// Use default privilege. +function vmem_write_value_meta forall 'n, 0 < 'n <= max_mem_access . ( + state : _WriteIntermediateState('n), + value : bits(8 * 'n), + meta : mem_meta, +) -> result(bool, VMemException) = + vmem_write_value_priv_meta(state, value, effectivePrivilege(state.typ, mstatus, cur_privilege), meta) + +// Write default metadata and use default privilege. +function vmem_write_value forall 'n, 0 < 'n <= max_mem_access . ( + state : _WriteIntermediateState('n), + value : bits(8 * 'n), +) -> result(bool, VMemException) = + vmem_write_value_meta(state, value, default_meta) diff --git a/model/riscv_vmem_translate.sail b/model/riscv_vmem_translate.sail new file mode 100644 index 000000000..eca755865 --- /dev/null +++ b/model/riscv_vmem_translate.sail @@ -0,0 +1,452 @@ +/*=======================================================================================*/ +/* This Sail RISC-V architecture model, comprising all files and */ +/* directories except where otherwise noted is subject the BSD */ +/* two-clause license in the LICENSE file. */ +/* */ +/* SPDX-License-Identifier: BSD-2-Clause */ +/*=======================================================================================*/ + +// **************************************************************** +// Virtual memory address translation and memory protection, +// including PTWs (Page Table Walks) and TLBs (Translation Look-aside Buffers) +// Supported VM modes: Sv32, Sv39, Sv48. TODO: Sv57 + +// STYLE NOTES: +// PRIVATE items are used only within this VM code. +// PUBLIC items are invoked from other parts of sail-riscv. + +// TLB NOTE: +// TLBs are not part of the RISC-V architecture specification. +// However, we model a simple TLB so that +// (1) we can meaningfully test SFENCE.VMA which is a no-op wihout TLBs; +// (2) we can greatly speed up simulation speed +// (e.g., from 10s or minutes to few minutes for Linux boot) +// The TLB implementation is in a separate file: riscv_vmem_tlb.sail +// The code in this file is structured and commented so you can easily +// ignore TLB functionality at first reading. + +// **************************************************************** +// Fields of virtual addresses + +// PRIVATE: Extract full VPN field from VA +function vpns_of_va(sv_params : SV_Params, + va : bits(64)) -> bits(64) = { + let mask : bits(64) = zero_extend(ones(sv_params.va_size_bits)); + (va & mask) >> pagesize_bits +} + +// PRIVATE: Extract VPN[level] from VA +function vpn_j_of_va(sv_params : SV_Params, + va : bits(64), + level : PTW_Level) -> bits(64) = { + let lsb : range(0,63) = pagesize_bits + level * sv_params.vpn_size_bits; + assert (lsb < sizeof(xlen)); + let mask : bits(64) = zero_extend(ones(sv_params.vpn_size_bits)); + ((va >> lsb) & mask) +} + +// PRIVATE: Extract offset within page from VA +function offset_of_va(va : bits(64)) -> bits(PAGESIZE_BITS) = va[pagesize_bits - 1 .. 0] + +// Valid xlen-wide values containing virtual addrs must have upper +// bits equal to the MSB of the virtual address. +// Virtual address widths depend on the virtual memory mode. +// PRIVATE +function is_valid_vAddr(struct { va_size_bits, _ } : SV_Params, + vAddr : bits(64)) -> bool = + vAddr == sign_extend(vAddr[va_size_bits - 1 .. 0]) + +// **************************************************************** +// Results of Page Table Walk (PTW) + +// PRIVATE +union PTW_Result = { + PTW_Success: (bits(64), bits(64), bits(64), nat, bool, ext_ptw), + PTW_Failure: (PTW_Error, ext_ptw) +} + +// **************************************************************** +// Page Table Walk (PTW) + +// Note: 'pt_walk()' is recursive => needs separate 'val' decls + +// PRIVATE +val pt_walk : (SV_Params, + bits(64), // virtual addr + AccessType(ext_access_type), // Read/Write/ReadWrite/Execute + Privilege, // User/Supervisor/Machine + bool, // mstatus.MXR + bool, // do_sum + bits(64), // PT base addr + PTW_Level, // tree level for this recursive call + bool, // global translation, + ext_ptw) // ext_ptw + -> PTW_Result + +function pt_walk(sv_params, + va, + ac, + priv, + mxr, + do_sum, + pt_base, + level, + global, + ext_ptw) = { + let vpn_j = vpn_j_of_va(sv_params, va, level); + let pte_offset = vpn_j << sv_params.log_pte_size_bytes; + let pte_addr = pt_base + pte_offset; + + // In Sv32, physical addrs are actually 34 bits, not XLEN(=32) bits. + // Below, 'pte_phys_addr' is XLEN bits because it's an arg to + // 'mem_read_priv()' [riscv_mem.sail] where it's declared as xlenbits. + // That def and this use need to be fixed together (TODO) + let pte_phys_addr : xlenbits = pte_addr[(sizeof(xlen) - 1) .. 0]; + + // Read this-level PTE from mem + let mem_result = mem_read_priv(Read(Data), // AccessType + Supervisor, // Privilege + pte_phys_addr, + 8, // atom (8) + false, // aq + false, // rl + false); // res + + match mem_result { + MemException(_) => PTW_Failure(PTW_Access(), ext_ptw), + MemValue(pte) => { + let pte_flags = Mk_PTE_Flags(pte[7 .. 0]); + if pte_is_invalid(pte_flags) then + PTW_Failure(PTW_Invalid_PTE(), ext_ptw) + else { + let ppns : bits(64) = PPNs_of_PTE(sv_params, pte); + let global' = global | (pte_flags[G] == 0b1); + if pte_is_ptr(pte_flags) then { + // Non-Leaf PTE + if level > 0 then { + // follow the pointer to walk next level + let pt_base' : bits(64) = ppns << pagesize_bits; + let level' = level - 1; + pt_walk(sv_params, va, ac, priv, mxr, do_sum, + pt_base', level', global', ext_ptw) + } + else + // level 0 PTE, but contains a pointer instead of a leaf + PTW_Failure(PTW_Invalid_PTE(), ext_ptw) + } + else { + // Leaf PTE + let ext_pte = msbs_of_PTE(sv_params, pte); + let pte_check = check_PTE_permission(ac, priv, mxr, do_sum, pte_flags, + ext_pte, ext_ptw); + match pte_check { + PTE_Check_Failure(ext_ptw, ext_ptw_fail) => + PTW_Failure(ext_get_ptw_error(ext_ptw_fail), ext_ptw), + PTE_Check_Success(ext_ptw) => + if level > 0 then { + // Superpage; construct mask for lower-level PPNs from the PTE + let mask_bits = level * sv_params.pte_PPN_j_size_bits; + // Clear the lowest `mask_bits` bits. + let ppns_masked = (ppns >> mask_bits) << mask_bits; + if not(ppns == ppns_masked) then + // misaligned superpage mapping + PTW_Failure(PTW_Misaligned(), ext_ptw) + else { + // Compose final PA in superpage: + // Superpage PPN + lower VPNs + pagesize_bits page-offset + let mask : bits(64) = ~ (ones() << mask_bits); + let ppn = ppns | (vpns_of_va(sv_params, va) & mask); + let pa = (ppn << pagesize_bits) | zero_extend(offset_of_va(va)); + PTW_Success(pa, pte, pte_addr, level, global', ext_ptw) + } + } + else { + let pa = (ppns << pagesize_bits) | zero_extend(offset_of_va(va)); + PTW_Success(pa, pte, pte_addr, level, global', ext_ptw) + } + } + } + } + } + } +} + +// **************************************************************** +// Architectural SATP CSR + +// PUBLIC: see also riscv_insts_zicsr.sail and other CSR-related files +register satp : xlenbits + +// See riscv_sys_regs.sail for legalize_satp{32,64}(). +// WARNING: those functions legalize Mode but not ASID? +// PUBLIC: invoked from writeCSR() to fixup WARL fields +function legalize_satp(a : Architecture, + o : xlenbits, // previous value of satp + v : xlenbits) // proposed new value of satp + -> xlenbits = { // new legal value of satp + if sizeof(xlen) == 32 then { + // The slice and extend ops below are no-ops when xlen==32, + // but appease the type-checker when xlen==64 (when this code is not executed!) + let o32 : bits(32) = o[31 .. 0]; + let v32 : bits(32) = v[31 .. 0]; + let new_satp : bits(32) = legalize_satp32(a, o32, v32); + zero_extend(new_satp); + } else if sizeof(xlen) == 64 then { + // The extend and truncate ops below are no-ops when xlen==64, + // but appease the type-checker when xlen==32 (when this code is not executed!) + let o64 : bits(64) = zero_extend(o); + let v64 : bits(64) = zero_extend(v); + let new_satp : bits(64) = legalize_satp64(a, o64, v64); + truncate(new_satp, sizeof(xlen)) + } else + internal_error(__FILE__, __LINE__, "Unsupported xlen" ^ dec_str(sizeof(xlen))) +} + +// ---------------- +// Fields of SATP + +// ASID is 9b in Sv32, 16b in Sv39/Sv48/Sv57: we use 16b for both +// PRIVATE +function satp_to_asid(satp_val : xlenbits) -> asidbits = + if sizeof(xlen) == 32 then zero_extend(Mk_Satp32(satp_val)[Asid]) + else if sizeof(xlen) == 64 then Mk_Satp64(satp_val)[Asid] + else internal_error(__FILE__, __LINE__, + "Unsupported xlen" ^ dec_str(sizeof(xlen))) + +// Result is 64b to cover both RV32 and RV64 addrs +// PRIVATE +function satp_to_PT_base(satp_val : xlenbits) -> bits(64) = { + let ppn = if sizeof(xlen) == 32 then zero_extend (64, Mk_Satp32(satp_val)[PPN]) + else if sizeof(xlen) == 64 then zero_extend (64, Mk_Satp64(satp_val)[PPN]) + else internal_error(__FILE__, __LINE__, + "Unsupported xlen" ^ dec_str(sizeof(xlen))); + ppn << pagesize_bits +} + +// Compute address translation mode from SATP register +// PRIVATE +function translationMode(priv : Privilege) -> SATPMode = { + if priv == Machine then + Sbare + else if sizeof(xlen) == 32 then + match Mk_Satp32(satp)[Mode] { + 0b0 => Sbare, + 0b1 => Sv32 + } + else if sizeof(xlen) == 64 then { + // Translation mode is based on mstatus.SXL, which could be RV32 when xlen==64 + let arch = architecture(get_mstatus_SXL(mstatus)); + match arch { + Some(RV64) => { let mbits : bits(4) = satp[63 .. 60]; + match satp64Mode_of_bits(RV64, mbits) { // see riscv_types.sail + Some(m) => m, + None() => internal_error(__FILE__, __LINE__, + "invalid RV64 translation mode in satp") + } + }, + Some(RV32) => match Mk_Satp32(satp[31 .. 0])[Mode] { // Note: satp is 64bits here + // When xlen is 64, mstatus.SXL (for S privilege) can be RV32 + 0b0 => Sbare, + 0b1 => Sv32 + }, + _ => internal_error(__FILE__, __LINE__, "unsupported address translation arch") + } + } + else + internal_error(__FILE__, __LINE__, "unsupported xlen") +} + +// **************************************************************** +// VA to PA translation + +// Result of address translation + +// PUBLIC +union TR_Result('paddr : Type, 'failure : Type) = { + TR_Address : ('paddr, ext_ptw), + TR_Failure : 'failure, +} + +// This function can be ignored on first reading since TLBs are not +// part of RISC-V architecture spec (see TLB_NOTE above). +// PRIVATE: translate on TLB hit, and maintenance of PTE in TLB +function translate_TLB_hit(sv_params : SV_Params, + asid : asidbits, + ptb : bits(64), + vAddr : bits(64), + ac : AccessType(ext_access_type), + priv : Privilege, + mxr : bool, + do_sum : bool, + ext_ptw : ext_ptw, + tlb_index : nat, + ent : TLB_Entry) + -> TR_Result(bits(64), PTW_Error) = { + let pte = ent.pte; + let ext_pte = msbs_of_PTE(sv_params, pte); + let pte_flags = Mk_PTE_Flags(pte[7 .. 0]); + let pte_check = check_PTE_permission(ac, priv, mxr, do_sum, pte_flags, + ext_pte, + ext_ptw); + match pte_check { + PTE_Check_Failure(ext_ptw, ext_ptw_fail) => + TR_Failure(ext_get_ptw_error(ext_ptw_fail)), + PTE_Check_Success(ext_ptw) => + match update_PTE_Bits(sv_params, pte, ac) { + None() => TR_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw), + Some(pte') => + // See riscv_platform.sail + if not(plat_enable_dirty_update()) then + // pte needs dirty/accessed update but that is not enabled + TR_Failure(PTW_PTE_Update()) + else { + // Writeback the PTE (which has new A/D bits) + let n_ent = {ent with pte=pte'}; + write_TLB(tlb_index, n_ent); + let pte_phys_addr = ent.pteAddr[(sizeof(xlen) - 1) .. 0]; + let mv = mem_write_value_priv(pte_phys_addr, + 8, + pte', + Supervisor, + false, + false, + false); + match mv { + MemValue(_) => (), + MemException(e) => internal_error(__FILE__, __LINE__, + "invalid physical address in TLB") + }; + TR_Address(ent.pAddr | (vAddr & ent.vAddrMask), ext_ptw) + } + } + } +} + +// PRIVATE: translate on TLB miss (do a page-table walk) +function translate_TLB_miss(sv_params : SV_Params, + asid : asidbits, + ptb : bits(64), + vAddr : bits(64), + ac : AccessType(ext_access_type), + priv : Privilege, + mxr : bool, + do_sum : bool, + ext_ptw : ext_ptw) -> TR_Result(bits(64), PTW_Error) = { + let initial_level = sv_params.levels - 1; + let ptw_result = pt_walk(sv_params, vAddr, ac, priv, mxr, do_sum, + ptb, initial_level, false, ext_ptw); + match ptw_result { + PTW_Failure(f, ext_ptw) => TR_Failure(f), + PTW_Success(pAddr, pte, pteAddr, level, global, ext_ptw) => { + let ext_pte = msbs_of_PTE(sv_params, pte); + // Without TLBs, this 'match' expression can be replaced simply + // by: 'TR_Address(pAddr, ext_ptw)' (see TLB_NOTE above) + match update_PTE_Bits(sv_params, pte, ac) { + None() => { + add_to_TLB(asid, vAddr, pAddr, pte, pteAddr, level, global, + sv_params.vpn_size_bits, + pagesize_bits); + TR_Address(pAddr, ext_ptw) + }, + Some(pte') => + // See riscv_platform.sail + if not(plat_enable_dirty_update()) then + // pte needs dirty/accessed update but that is not enabled + TR_Failure(PTW_PTE_Update()) + else { + // Writeback the PTE (which has new A/D bits) + let pte_phys_addr = pteAddr[(sizeof(xlen) - 1) .. 0]; + let mv = mem_write_value_priv(pte_phys_addr, // pteAddr, + 8, + pte', + Supervisor, + false, + false, + false); + match mv { + MemValue(_) => { + add_to_TLB(asid, vAddr, pAddr, pte', pteAddr, level, global, + sv_params.vpn_size_bits, + pagesize_bits); + TR_Address(pAddr, ext_ptw) + }, + MemException(e) => + TR_Failure(PTW_Access()) + } + } + } + } + } +} + +// PRIVATE +function translate(sv_params : SV_Params, + asid : asidbits, + ptb : bits(64), + vAddr_arg : bits(64), + ac : AccessType(ext_access_type), + priv : Privilege, + mxr : bool, + do_sum : bool, + ext_ptw : ext_ptw) + -> TR_Result(bits(64), PTW_Error) = { + let va_mask : bits(64) = zero_extend(ones(sv_params.va_size_bits)); + let vAddr = (vAddr_arg & va_mask); + + // On first reading, assume lookup_TLB returns None(), since TLBs + // are not part of RISC-V archticture spec (see TLB_NOTE above) + match lookup_TLB(asid, vAddr) { + Some(index, ent) => translate_TLB_hit(sv_params, asid, ptb, vAddr, ac, priv, + mxr, do_sum, ext_ptw, index, ent), + None() => translate_TLB_miss(sv_params, asid, ptb, vAddr, ac, priv, + mxr, do_sum, ext_ptw) + } +} + +// Top-level addr-translation function +// PUBLIC: invoked from instr-fetch and load/store/amo +function translateAddr(vAddr : xlenbits, + ac : AccessType(ext_access_type)) + -> TR_Result(xlenbits, ExceptionType) = { + // Internally the vmem code works with 64-bit values, whether xlen==32 or xlen==64 + // This 'extend' is a no-op when xlen==64 and extends when xlen==32 + let vAddr_64b : bits(64) = zero_extend(vAddr); + // Effective privilege takes into account mstatus.PRV, mstatus.MPP + // See riscv_sys_regs.sail for effectivePrivilege() and cur_privilege + let effPriv : Privilege = effectivePrivilege(ac, mstatus, cur_privilege); + let mode : SATPMode = translationMode(effPriv); + let (valid_va, sv_params) : (bool, SV_Params) = match mode { + Sbare => return TR_Address(vAddr, init_ext_ptw), + Sv32 => (true, sv32_params), + Sv39 => (is_valid_vAddr(sv39_params, vAddr_64b), sv39_params), + Sv48 => (is_valid_vAddr(sv48_params, vAddr_64b), sv48_params), + // Sv57 => (is_valid_vAddr(sv57_params, vAddr_64b), sv57_params), // TODO + }; + if not(valid_va) then + TR_Failure(translationException(ac, PTW_Invalid_Addr())) + else { + let mxr = mstatus.MXR() == 0b1; + let do_sum = mstatus.SUM() == 0b1; + let asid : asidbits = satp_to_asid(satp); + let ptb : bits(64) = satp_to_PT_base(satp); + let tr_result1 = translate(sv_params, + asid, + ptb, + vAddr_64b, + ac, effPriv, mxr, do_sum, + init_ext_ptw); + // Fixup result PA or exception + match tr_result1 { + TR_Address(pa, ext_ptw) => TR_Address(truncate(pa, sizeof(xlen)), ext_ptw), + TR_Failure(f) => TR_Failure(translationException(ac, f)) + } + } +} + +// **************************************************************** +// Initialize Virtual Memory state + +// PUBLIC: invoked from init_model() +function init_vmem() -> unit = init_TLB() + +// ****************************************************************