Make some linux/unix APIs better conform to strict provenance.

This largely makes the stdlib conform to strict provenance on Ubuntu.
Some hairier things have been left alone for now.
This commit is contained in:
Alexis Beingessner 2022-03-22 21:29:38 -04:00 committed by Aria Beingessner
parent 68643603ad
commit 09395f626b
5 changed files with 39 additions and 36 deletions

View File

@ -493,7 +493,7 @@ impl RawFrame {
match self {
RawFrame::Actual(frame) => frame.ip(),
#[cfg(test)]
RawFrame::Fake => ptr::invalid_mut(1),
RawFrame::Fake => crate::ptr::invalid_mut(1),
}
}
}

View File

@ -17,8 +17,8 @@ mod libc {
fn sun_path_offset(addr: &libc::sockaddr_un) -> usize {
// Work with an actual instance of the type since using a null pointer is UB
let base = addr as *const _ as usize;
let path = &addr.sun_path as *const _ as usize;
let base = (addr as *const libc::sockaddr_un).addr();
let path = (&addr.sun_path as *const i8).addr();
path - base
}

View File

@ -9,7 +9,7 @@ pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.len(),
)
};
if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) }
if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
}
pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
@ -26,7 +26,7 @@ pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.len(),
)
};
if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) }
if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
}
#[cfg(not(target_os = "linux"))]

View File

@ -505,9 +505,8 @@ pub mod guard {
#[cfg(target_os = "macos")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let th = libc::pthread_self();
let stackaddr =
libc::pthread_get_stackaddr_np(th) as usize - libc::pthread_get_stacksize_np(th);
Some(stackaddr as *mut libc::c_void)
let stackptr = libc::pthread_get_stackaddr_np(th);
Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
}
#[cfg(target_os = "openbsd")]
@ -515,14 +514,15 @@ pub mod guard {
let mut current_stack: libc::stack_t = crate::mem::zeroed();
assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
let stack_ptr = current_stack.ss_sp;
let stackaddr = if libc::pthread_main_np() == 1 {
// main thread
current_stack.ss_sp as usize - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
} else {
// new thread
current_stack.ss_sp as usize - current_stack.ss_size
stack_ptr.addr() - current_stack.ss_size
};
Some(stackaddr as *mut libc::c_void)
Some(stack_ptr.with_addr(stack_addr))
}
#[cfg(any(
@ -557,7 +557,8 @@ pub mod guard {
unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed);
assert!(page_size != 0);
let stackaddr = get_stack_start()?;
let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
// Ensure stackaddr is page aligned! A parent process might
// have reset RLIMIT_STACK to be non-page aligned. The
@ -565,11 +566,11 @@ pub mod guard {
// stackaddr < stackaddr + stacksize, so if stackaddr is not
// page-aligned, calculate the fix such that stackaddr <
// new_page_aligned_stackaddr < stackaddr + stacksize
let remainder = (stackaddr as usize) % page_size;
let remainder = stackaddr % page_size;
Some(if remainder == 0 {
stackaddr
stackptr
} else {
((stackaddr as usize) + page_size - remainder) as *mut libc::c_void
stackptr.with_addr(stackaddr + page_size - remainder)
})
}
@ -588,8 +589,8 @@ pub mod guard {
// Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work.
let stackaddr = get_stack_start_aligned()?;
let stackaddr = stackaddr as usize;
let stackptr = get_stack_start_aligned()?;
let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {
// For the main thread, the musl's pthread_attr_getstack
@ -602,8 +603,8 @@ pub mod guard {
// at the bottom. If we try to remap the bottom of the stack
// ourselves, FreeBSD's guard page moves upwards. So we'll just use
// the builtin guard page.
let stackaddr = get_stack_start_aligned()?;
let guardaddr = stackaddr as usize;
let stackptr = get_stack_start_aligned()?;
let guardaddr = stackptr.addr();
// Technically the number of guard pages is tunable and controlled
// by the security.bsd.stack_guard_page sysctl, but there are
// few reasons to change it from the default. The default value has
@ -620,25 +621,25 @@ pub mod guard {
// than the initial mmap() used, so we mmap() here with
// read/write permissions and only then mprotect() it to
// no permissions at all. See issue #50313.
let stackaddr = get_stack_start_aligned()?;
let stackptr = get_stack_start_aligned()?;
let result = mmap(
stackaddr,
stackptr,
page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1,
0,
);
if result != stackaddr || result == MAP_FAILED {
if result != stackptr || result == MAP_FAILED {
panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
}
let result = mprotect(stackaddr, page_size, PROT_NONE);
let result = mprotect(stackptr, page_size, PROT_NONE);
if result != 0 {
panic!("failed to protect the guard page: {}", io::Error::last_os_error());
}
let guardaddr = stackaddr as usize;
let guardaddr = stackptr.addr();
Some(guardaddr..guardaddr + page_size)
}
@ -646,7 +647,8 @@ pub mod guard {
#[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
pub unsafe fn current() -> Option<Guard> {
let stackaddr = get_stack_start()? as usize;
let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
}
@ -679,11 +681,11 @@ pub mod guard {
panic!("there is no guard page");
}
}
let mut stackaddr = crate::ptr::null_mut();
let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
let mut size = 0;
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut size), 0);
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
let stackaddr = stackaddr as usize;
let stackaddr = stackptr.addr();
ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) {
Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {

View File

@ -22,9 +22,10 @@
// that, we'll just allow that some unix targets don't use this module at all.
#![allow(dead_code, unused_macros)]
use crate::ffi::CStr;
use crate::ffi::{c_void, CStr};
use crate::marker::PhantomData;
use crate::mem;
use crate::ptr;
use crate::sync::atomic::{self, AtomicUsize, Ordering};
// We can use true weak linkage on ELF targets.
@ -129,25 +130,25 @@ impl<F> DlsymWeak<F> {
// Cold because it should only happen during first-time initialization.
#[cold]
unsafe fn initialize(&self) -> Option<F> {
assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>());
assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut ()>());
let val = fetch(self.name);
// This synchronizes with the acquire fence in `get`.
self.addr.store(val, Ordering::Release);
self.addr.store(val.addr(), Ordering::Release);
match val {
match val.addr() {
0 => None,
addr => Some(mem::transmute_copy::<usize, F>(&addr)),
_ => Some(mem::transmute_copy::<*mut c_void, F>(&val)),
}
}
}
unsafe fn fetch(name: &str) -> usize {
unsafe fn fetch(name: &str) -> *mut c_void {
let name = match CStr::from_bytes_with_nul(name.as_bytes()) {
Ok(cstr) => cstr,
Err(..) => return 0,
Err(..) => return ptr::null_mut(),
};
libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize
libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr())
}
#[cfg(not(any(target_os = "linux", target_os = "android")))]