Skip to content

Commit

Permalink
workspace: enable unsafe_op_in_unsafe_fn lint
Browse files Browse the repository at this point in the history
It is good to mark functions as `unsafe` when the safety of their
behavior is not guaranteed.  It is not goot to permit these functions to
perform additional unsafe operations without warning.  All uses of
unsafe code should require explicit `unsafe` annotation regardless of
whether they appear in safe or unsafe functions.

Signed-off-by: Jon Lange <[email protected]>
  • Loading branch information
msft-jlange committed Nov 16, 2024
1 parent d60a5c5 commit 01f144a
Show file tree
Hide file tree
Showing 19 changed files with 130 additions and 67 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ rust_2018_idioms = { level = "deny", priority = 1 }
missing_debug_implementations = { level = "deny", priority = 50 }
single_use_lifetimes = { level = "warn", priority = 125 }
trivial-numeric-casts = { level = "deny", priority = 10 }
unsafe_op_in_unsafe_fn = { level = "deny", priority = 2 }

[workspace.lints.clippy]
await_holding_lock = "warn"
Expand Down
42 changes: 26 additions & 16 deletions fuzz/fuzz_targets/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,41 +51,51 @@ impl PoisonAllocator {
}

unsafe fn unpoison_mem(&self, ptr: *mut u8, size: usize) {
ptr.write_bytes(Self::WRITE_BYTE, size);
unsafe {
ptr.write_bytes(Self::WRITE_BYTE, size);
}
}

unsafe fn poison_mem(&self, ptr: *mut u8, size: usize) {
ptr.write_bytes(Self::POISON_BYTE, size);
unsafe {
ptr.write_bytes(Self::POISON_BYTE, size);
}
}

unsafe fn check_mem(&self, ptr: *mut u8, size: usize) {
for i in 0..size {
assert_eq!(ptr.add(i).read_volatile(), Self::WRITE_BYTE);
assert_eq!(unsafe { ptr.add(i).read_volatile() }, Self::WRITE_BYTE);
}
}

unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ptr = self.heap.alloc(layout);
if !ptr.is_null() {
self.unpoison_mem(ptr, layout.size());
unsafe {
let ptr = self.heap.alloc(layout);
if !ptr.is_null() {
self.unpoison_mem(ptr, layout.size());
}
ptr
}
ptr
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.check_mem(ptr, layout.size());
self.poison_mem(ptr, layout.size());
self.heap.dealloc(ptr, layout);
unsafe {
self.check_mem(ptr, layout.size());
self.poison_mem(ptr, layout.size());
self.heap.dealloc(ptr, layout);
}
}

unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_layout: Layout) -> *mut u8 {
self.check_mem(ptr, layout.size());
self.poison_mem(ptr, layout.size());
let ptr = self.heap.realloc(ptr, layout, new_layout.size());
if !ptr.is_null() {
self.unpoison_mem(ptr, new_layout.size());
unsafe {
self.check_mem(ptr, layout.size());
self.poison_mem(ptr, layout.size());
let ptr = self.heap.realloc(ptr, layout, new_layout.size());
if !ptr.is_null() {
self.unpoison_mem(ptr, new_layout.size());
}
ptr
}
ptr
}
}

Expand Down
2 changes: 1 addition & 1 deletion fuzz/fuzz_targets/page_alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ fn get_item<T>(v: &[T], idx: usize) -> Option<&T> {

#[inline]
unsafe fn fill_page(page: VirtAddr, byte: u8) {
page.as_mut_ptr::<u8>().write_bytes(byte, PAGE_SIZE)
unsafe { page.as_mut_ptr::<u8>().write_bytes(byte, PAGE_SIZE) }
}

#[inline]
Expand Down
6 changes: 3 additions & 3 deletions kernel/src/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ impl VirtAddr {
#[inline]
pub unsafe fn aligned_ref<'a, T>(&self) -> Option<&'a T> {
self.is_aligned_to::<T>()
.then(|| self.as_ptr::<T>().as_ref())
.then(|| unsafe { self.as_ptr::<T>().as_ref() })
.flatten()
}

Expand All @@ -256,7 +256,7 @@ impl VirtAddr {
#[inline]
pub unsafe fn aligned_mut<'a, T>(&self) -> Option<&'a mut T> {
self.is_aligned_to::<T>()
.then(|| self.as_mut_ptr::<T>().as_mut())
.then(|| unsafe { self.as_mut_ptr::<T>().as_mut() })
.flatten()
}

Expand All @@ -279,7 +279,7 @@ impl VirtAddr {
/// All Safety requirements from [`core::slice::from_raw_parts`] for the
/// data pointed to by the `VirtAddr` apply here as well.
pub unsafe fn to_slice<T>(&self, len: usize) -> &[T] {
slice::from_raw_parts::<T>(self.as_ptr::<T>(), len)
unsafe { slice::from_raw_parts::<T>(self.as_ptr::<T>(), len) }
}
}

Expand Down
10 changes: 7 additions & 3 deletions kernel/src/cpu/gdt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,12 +79,16 @@ impl GDT {

let tss_entries = &self.entries[idx..idx + 1].as_mut_ptr();

tss_entries.add(0).write_volatile(desc0);
tss_entries.add(1).write_volatile(desc1);
unsafe {
tss_entries.add(0).write_volatile(desc0);
tss_entries.add(1).write_volatile(desc1);
}
}

unsafe fn clear_tss_entry(&mut self) {
self.set_tss_entry(GDTEntry::null(), GDTEntry::null());
unsafe {
self.set_tss_entry(GDTEntry::null(), GDTEntry::null());
}
}

pub fn load_tss(&mut self, tss: &X86Tss) {
Expand Down
16 changes: 12 additions & 4 deletions kernel/src/cpu/irq_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ const EFLAGS_IF: u64 = 1 << 9;
/// Callers need to take care of re-enabling IRQs.
#[inline(always)]
pub unsafe fn raw_irqs_disable() {
asm!("cli", options(att_syntax, preserves_flags, nomem));
unsafe {
asm!("cli", options(att_syntax, preserves_flags, nomem));
}
}

/// Unconditionally enable IRQs
Expand All @@ -32,7 +34,9 @@ pub unsafe fn raw_irqs_disable() {
/// have been enabled.
#[inline(always)]
pub unsafe fn raw_irqs_enable() {
asm!("sti", options(att_syntax, preserves_flags, nomem));
unsafe {
asm!("sti", options(att_syntax, preserves_flags, nomem));
}

// Now that interrupts are enabled, process any #HV events that may be
// pending.
Expand Down Expand Up @@ -111,7 +115,9 @@ impl IrqState {
pub unsafe fn disable(&self) {
let state = irqs_enabled();

raw_irqs_disable();
unsafe {
raw_irqs_disable();
}
let val = self.count.fetch_add(1, Ordering::Relaxed);

assert!(val >= 0);
Expand Down Expand Up @@ -139,7 +145,9 @@ impl IrqState {
if val == 1 {
let state = self.state.load(Ordering::Relaxed);
if state {
raw_irqs_enable();
unsafe {
raw_irqs_enable();
}
}
}
}
Expand Down
18 changes: 13 additions & 5 deletions kernel/src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ impl PerCpuAreas {
}

unsafe fn push(&self, info: PerCpuInfo) {
let ptr = self.areas.get().as_mut().unwrap();
let ptr = unsafe { self.areas.get().as_mut().unwrap() };
ptr.push(info);
}

Expand Down Expand Up @@ -383,7 +383,9 @@ impl PerCpu {
/// `enable()` call.
#[inline(always)]
pub unsafe fn irqs_disable(&self) {
self.irq_state.disable();
unsafe {
self.irq_state.disable();
}
}

/// Reduces IRQ-disable nesting level on the current CPU and restores the
Expand All @@ -395,7 +397,9 @@ impl PerCpu {
/// `enable()` call.
#[inline(always)]
pub unsafe fn irqs_enable(&self) {
self.irq_state.enable();
unsafe {
self.irq_state.enable();
}
}

/// Get IRQ-disable nesting count on the current CPU
Expand Down Expand Up @@ -982,7 +986,9 @@ pub fn this_cpu_shared() -> &'static PerCpuShared {
/// `irqs_enable()` call.
#[inline(always)]
pub unsafe fn irqs_disable() {
this_cpu().irqs_disable();
unsafe {
this_cpu().irqs_disable();
}
}

/// Reduces IRQ-disable nesting level on the current CPU and restores the
Expand All @@ -994,7 +1000,9 @@ pub unsafe fn irqs_disable() {
/// `irqs_enable()` call.
#[inline(always)]
pub unsafe fn irqs_enable() {
this_cpu().irqs_enable();
unsafe {
this_cpu().irqs_enable();
}
}

/// Get IRQ-disable nesting count on the current CPU
Expand Down
8 changes: 6 additions & 2 deletions kernel/src/cpu/sse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,14 +79,16 @@ pub fn sse_init() {
/// no other part of the code is accessing this memory at the same time.
pub unsafe fn sse_save_context(addr: u64) {
let save_bits = XCR0_X87_ENABLE | XCR0_SSE_ENABLE | XCR0_YMM_ENABLE;
asm!(
unsafe {
asm!(
r#"
xsaveopt (%rsi)
"#,
in("rsi") addr,
in("rax") save_bits,
in("rdx") 0,
options(att_syntax));
}
}

/// # Safety
Expand All @@ -95,12 +97,14 @@ pub unsafe fn sse_save_context(addr: u64) {
/// no other part of the code is accessing this memory at the same time.
pub unsafe fn sse_restore_context(addr: u64) {
let save_bits = XCR0_X87_ENABLE | XCR0_SSE_ENABLE | XCR0_YMM_ENABLE;
asm!(
unsafe {
asm!(
r#"
xrstor (%rsi)
"#,
in("rsi") addr,
in("rax") save_bits,
in("rdx") 0,
options(att_syntax));
}
}
6 changes: 4 additions & 2 deletions kernel/src/insn_decode/insn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,11 +334,13 @@ pub mod test_utils {
type Item = T;

unsafe fn mem_read(&self) -> Result<Self::Item, InsnError> {
Ok(*(self.ptr))
Ok(unsafe { *(self.ptr) })
}

unsafe fn mem_write(&mut self, data: Self::Item) -> Result<(), InsnError> {
*(self.ptr) = data;
unsafe {
*(self.ptr) = data;
}
Ok(())
}
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/mm/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ impl MemoryRegion {
/// undefined, as the compiler is allowed to optimize assuming there will
/// be no arithmetic overflows.
unsafe fn page_info_mut_ptr(&mut self, pfn: usize) -> *mut PageStorageType {
self.start_virt.as_mut_ptr::<PageStorageType>().add(pfn)
unsafe { self.start_virt.as_mut_ptr::<PageStorageType>().add(pfn) }
}

/// Gets a pointer to the page information for a given page frame number.
Expand All @@ -437,7 +437,7 @@ impl MemoryRegion {
/// undefined, as the compiler is allowed to optimize assuming there will
/// be no arithmetic overflows.
unsafe fn page_info_ptr(&self, pfn: usize) -> *const PageStorageType {
self.start_virt.as_ptr::<PageStorageType>().add(pfn)
unsafe { self.start_virt.as_ptr::<PageStorageType>().add(pfn) }
}

/// Checks if a page frame number is valid.
Expand Down
21 changes: 14 additions & 7 deletions kernel/src/mm/guestmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ unsafe fn read_u16(v: VirtAddr) -> Result<u16, SvsmError> {
let mut rcx: u64;
let mut val: u64;

asm!("1: movw ({0}), {1}",
unsafe {
asm!("1: movw ({0}), {1}",
" xorq %rcx, %rcx",
"2:",
".pushsection \"__exception_table\",\"a\"",
Expand All @@ -102,6 +103,7 @@ unsafe fn read_u16(v: VirtAddr) -> Result<u16, SvsmError> {
out(reg) val,
out("rcx") rcx,
options(att_syntax, nostack));
}

let ret: u16 = (val & 0xffff) as u16;
if rcx == 0 {
Expand All @@ -117,7 +119,8 @@ unsafe fn read_u32(v: VirtAddr) -> Result<u32, SvsmError> {
let mut rcx: u64;
let mut val: u64;

asm!("1: movl ({0}), {1}",
unsafe {
asm!("1: movl ({0}), {1}",
" xorq %rcx, %rcx",
"2:",
".pushsection \"__exception_table\",\"a\"",
Expand All @@ -129,6 +132,7 @@ unsafe fn read_u32(v: VirtAddr) -> Result<u32, SvsmError> {
out(reg) val,
out("rcx") rcx,
options(att_syntax, nostack));
}

let ret: u32 = (val & 0xffffffff) as u32;
if rcx == 0 {
Expand All @@ -144,7 +148,8 @@ unsafe fn read_u64(v: VirtAddr) -> Result<u64, SvsmError> {
let mut rcx: u64;
let mut val: u64;

asm!("1: movq ({0}), {1}",
unsafe {
asm!("1: movq ({0}), {1}",
" xorq %rcx, %rcx",
"2:",
".pushsection \"__exception_table\",\"a\"",
Expand All @@ -156,7 +161,7 @@ unsafe fn read_u64(v: VirtAddr) -> Result<u64, SvsmError> {
out(reg) val,
out("rcx") rcx,
options(att_syntax, nostack));

}
if rcx == 0 {
Ok(val)
} else {
Expand All @@ -169,7 +174,8 @@ unsafe fn do_movsb<T>(src: *const T, dst: *mut T) -> Result<(), SvsmError> {
let size: usize = size_of::<T>();
let mut rcx: u64;

asm!("1:cld
unsafe {
asm!("1:cld
rep movsb
2:
.pushsection \"__exception_table\",\"a\"
Expand All @@ -181,6 +187,7 @@ unsafe fn do_movsb<T>(src: *const T, dst: *mut T) -> Result<(), SvsmError> {
inout("rdi") dst => _,
inout("rcx") size => rcx,
options(att_syntax, nostack));
}

if rcx == 0 {
Ok(())
Expand Down Expand Up @@ -269,12 +276,12 @@ impl<T: Copy> InsnMachineMem for GuestPtr<T> {

/// Safety: See the GuestPtr's read() method documentation for safety requirements.
unsafe fn mem_read(&self) -> Result<Self::Item, InsnError> {
self.read().map_err(|_| InsnError::MemRead)
unsafe { self.read().map_err(|_| InsnError::MemRead) }
}

/// Safety: See the GuestPtr's write() method documentation for safety requirements.
unsafe fn mem_write(&mut self, data: Self::Item) -> Result<(), InsnError> {
self.write(data).map_err(|_| InsnError::MemWrite)
unsafe { self.write(data).map_err(|_| InsnError::MemWrite) }
}
}

Expand Down
Loading

0 comments on commit 01f144a

Please sign in to comment.