Auto merge of #27174 - Gankro:rc-sat, r=alexcrichton

See https://internals.rust-lang.org/t/rc-is-unsafe-mostly-on-32-bit-targets-due-to-overflow/2120 for detailed discussion of this problem.
This commit is contained in:
bors 2015-07-30 05:54:55 +00:00
commit db2af71d59
2 changed files with 42 additions and 7 deletions

View File

@ -78,16 +78,18 @@ use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
use core::fmt;
use core::cmp::Ordering;
use core::mem::{align_of_val, size_of_val};
use core::intrinsics::drop_in_place;
use core::intrinsics::{drop_in_place, abort};
use core::mem;
use core::nonzero::NonZero;
use core::ops::{Deref, CoerceUnsized};
use core::ptr;
use core::marker::Unsize;
use core::hash::{Hash, Hasher};
use core::usize;
use core::{usize, isize};
use heap::deallocate;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
/// An atomically reference counted wrapper for shared state.
///
/// # Examples
@ -312,7 +314,21 @@ impl<T: ?Sized> Clone for Arc<T> {
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
self.inner().strong.fetch_add(1, Relaxed);
let old_size = self.inner().strong.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
unsafe { abort(); }
}
Arc { _ptr: self._ptr }
}
}
@ -617,7 +633,13 @@ impl<T: ?Sized> Clone for Weak<T> {
// fetch_add (ignoring the lock) because the weak count is only locked
// where are *no other* weak pointers in existence. (So we can't be
// running this code in that case).
self.inner().weak.fetch_add(1, Relaxed);
let old_size = self.inner().weak.fetch_add(1, Relaxed);
// See comments in Arc::clone() for why we do this (for mem::forget).
if old_size > MAX_REFCOUNT {
unsafe { abort(); }
}
return Weak { _ptr: self._ptr }
}
}

View File

@ -161,7 +161,7 @@ use core::cell::Cell;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hasher, Hash};
use core::intrinsics::{assume, drop_in_place};
use core::intrinsics::{assume, drop_in_place, abort};
use core::marker::{self, Unsize};
use core::mem::{self, align_of, size_of, align_of_val, size_of_val, forget};
use core::nonzero::NonZero;
@ -858,6 +858,15 @@ impl<T: ?Sized+fmt::Debug> fmt::Debug for Weak<T> {
}
}
// NOTE: We checked_add here to deal with mem::forget safety. In particular
// if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then
// you can free the allocation while outstanding Rcs (or Weaks) exist.
// We abort because this is such a degenerate scenario that we don't care about
// what happens -- no real program should ever experience this.
//
// This should have negligible overhead since you don't actually need to
// clone these much in Rust thanks to ownership and move-semantics.
#[doc(hidden)]
trait RcBoxPtr<T: ?Sized> {
fn inner(&self) -> &RcBox<T>;
@ -866,7 +875,9 @@ trait RcBoxPtr<T: ?Sized> {
fn strong(&self) -> usize { self.inner().strong.get() }
#[inline]
fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
fn inc_strong(&self) {
self.inner().strong.set(self.strong().checked_add(1).unwrap_or_else(|| unsafe { abort() }));
}
#[inline]
fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
@ -875,7 +886,9 @@ trait RcBoxPtr<T: ?Sized> {
fn weak(&self) -> usize { self.inner().weak.get() }
#[inline]
fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
fn inc_weak(&self) {
self.inner().weak.set(self.weak().checked_add(1).unwrap_or_else(|| unsafe { abort() }));
}
#[inline]
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }