Auto merge of #22532 - pnkfelix:arith-overflow, r=pnkfelix,eddyb

Rebase and follow-through on work done by @cmr and @aatch.

Implements most of rust-lang/rfcs#560. Errors encountered from the checks during building were fixed.

The checks for division, remainder and bit-shifting have not been implemented yet.

See also PR #20795

cc @Aatch ; cc @nikomatsakis
This commit is contained in:
bors 2015-03-03 14:18:03 +00:00
commit 14f0942a49
57 changed files with 1189 additions and 375 deletions

View File

@ -818,11 +818,11 @@ impl BitVec {
let full_value = if value { !0 } else { 0 };
// Correct the old tail word, setting or clearing formerly unused bits
let old_last_word = blocks_for_bits(self.nbits) - 1;
let num_cur_blocks = blocks_for_bits(self.nbits);
if self.nbits % u32::BITS as usize > 0 {
let mask = mask_for_bits(self.nbits);
if value {
self.storage[old_last_word] |= !mask;
self.storage[num_cur_blocks - 1] |= !mask;
} else {
// Extra bits are already zero by invariant.
}
@ -830,7 +830,7 @@ impl BitVec {
// Fill in words after the old tail word
let stop_idx = cmp::min(self.storage.len(), new_nblocks);
for idx in old_last_word + 1..stop_idx {
for idx in num_cur_blocks..stop_idx {
self.storage[idx] = full_value;
}

View File

@ -25,7 +25,7 @@ use core::fmt::Debug;
use core::hash::{Hash, Hasher};
use core::iter::{Map, FromIterator, IntoIterator};
use core::ops::{Index, IndexMut};
use core::{iter, fmt, mem};
use core::{iter, fmt, mem, usize};
use Bound::{self, Included, Excluded, Unbounded};
use borrow::Borrow;
@ -1467,7 +1467,7 @@ macro_rules! range_impl {
$Range {
inner: AbsIter {
traversals: traversals,
size: 0, // unused
size: usize::MAX, // unused
}
}
}

View File

@ -1215,7 +1215,8 @@ impl<K, V> Node<K, V> {
ptr::copy(
self.edges_mut().as_mut_ptr().offset(index as isize),
self.edges().as_ptr().offset(index as isize + 1),
self.len() - index + 1
// index can be == len+1, so do the +1 first to avoid underflow.
(self.len() + 1) - index
);
edge

View File

@ -96,6 +96,7 @@ use core::iter::{range_step, MultiplicativeIterator};
use core::marker::Sized;
use core::mem::size_of;
use core::mem;
use core::num::wrapping::WrappingOps;
use core::ops::FnMut;
use core::option::Option::{self, Some, None};
use core::ptr::PtrExt;
@ -1209,10 +1210,14 @@ struct SizeDirection {
impl Iterator for ElementSwaps {
type Item = (usize, usize);
#[inline]
// #[inline]
fn next(&mut self) -> Option<(usize, usize)> {
fn new_pos_wrapping(i: usize, s: Direction) -> usize {
i.wrapping_add(match s { Pos => 1, Neg => -1 })
}
fn new_pos(i: usize, s: Direction) -> usize {
i + match s { Pos => 1, Neg => -1 }
match s { Pos => i + 1, Neg => i - 1 }
}
// Find the index of the largest mobile element:
@ -1220,7 +1225,7 @@ impl Iterator for ElementSwaps {
// swap should be with a smaller `size` element.
let max = self.sdir.iter().cloned().enumerate()
.filter(|&(i, sd)|
new_pos(i, sd.dir) < self.sdir.len() &&
new_pos_wrapping(i, sd.dir) < self.sdir.len() &&
self.sdir[new_pos(i, sd.dir)].size < sd.size)
.max_by(|&(_, sd)| sd.size);
match max {

View File

@ -26,6 +26,7 @@ use core::fmt;
use core::iter::{self, repeat, FromIterator, IntoIterator, RandomAccessIterator};
use core::mem;
use core::num::{Int, UnsignedInt};
use core::num::wrapping::WrappingOps;
use core::ops::{Index, IndexMut};
use core::ptr::{self, Unique};
use core::raw::Slice as RawSlice;
@ -120,6 +121,20 @@ impl<T> VecDeque<T> {
#[inline]
fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap) }
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
wrap_index(idx.wrapping_add(addend), self.cap)
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
wrap_index(idx.wrapping_sub(subtrahend), self.cap)
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
@ -197,7 +212,7 @@ impl<T> VecDeque<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self, i: usize) -> Option<&T> {
if i < self.len() {
let idx = self.wrap_index(self.tail + i);
let idx = self.wrap_add(self.tail, i);
unsafe { Some(&*self.ptr.offset(idx as isize)) }
} else {
None
@ -227,7 +242,7 @@ impl<T> VecDeque<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self, i: usize) -> Option<&mut T> {
if i < self.len() {
let idx = self.wrap_index(self.tail + i);
let idx = self.wrap_add(self.tail, i);
unsafe { Some(&mut *self.ptr.offset(idx as isize)) }
} else {
None
@ -257,8 +272,8 @@ impl<T> VecDeque<T> {
pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len());
assert!(j < self.len());
let ri = self.wrap_index(self.tail + i);
let rj = self.wrap_index(self.tail + j);
let ri = self.wrap_add(self.tail, i);
let rj = self.wrap_add(self.tail, j);
unsafe {
ptr::swap(self.ptr.offset(ri as isize), self.ptr.offset(rj as isize))
}
@ -427,7 +442,7 @@ impl<T> VecDeque<T> {
// [. . . o o o o o o o . . . . . . ]
// H T
// [o o . o o o o o ]
let len = self.wrap_index(self.head - target_cap);
let len = self.wrap_sub(self.head, target_cap);
unsafe {
self.copy_nonoverlapping(0, target_cap, len);
}
@ -438,7 +453,7 @@ impl<T> VecDeque<T> {
// [o o o o o . . . . . . . . . o o ]
// H T
// [o o o o o . o o ]
debug_assert!(self.wrap_index(self.head - 1) < target_cap);
debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
let len = self.cap - self.tail;
let new_tail = target_cap - len;
unsafe {
@ -775,7 +790,7 @@ impl<T> VecDeque<T> {
None
} else {
let tail = self.tail;
self.tail = self.wrap_index(self.tail + 1);
self.tail = self.wrap_add(self.tail, 1);
unsafe { Some(self.buffer_read(tail)) }
}
}
@ -799,7 +814,7 @@ impl<T> VecDeque<T> {
debug_assert!(!self.is_full());
}
self.tail = self.wrap_index(self.tail - 1);
self.tail = self.wrap_sub(self.tail, 1);
let tail = self.tail;
unsafe { self.buffer_write(tail, t); }
}
@ -824,7 +839,7 @@ impl<T> VecDeque<T> {
}
let head = self.head;
self.head = self.wrap_index(self.head + 1);
self.head = self.wrap_add(self.head, 1);
unsafe { self.buffer_write(head, t) }
}
@ -847,7 +862,7 @@ impl<T> VecDeque<T> {
if self.is_empty() {
None
} else {
self.head = self.wrap_index(self.head - 1);
self.head = self.wrap_sub(self.head, 1);
let head = self.head;
unsafe { Some(self.buffer_read(head)) }
}
@ -971,7 +986,7 @@ impl<T> VecDeque<T> {
// A - The element that should be after the insertion point
// M - Indicates element was moved
let idx = self.wrap_index(self.tail + i);
let idx = self.wrap_add(self.tail, i);
let distance_to_tail = i;
let distance_to_head = self.len() - i;
@ -990,7 +1005,7 @@ impl<T> VecDeque<T> {
// [A o o o o o o o . . . . . I]
//
self.tail = self.wrap_index(self.tail - 1);
self.tail = self.wrap_sub(self.tail, 1);
},
(true, true, _) => unsafe {
// contiguous, insert closer to tail:
@ -1012,7 +1027,7 @@ impl<T> VecDeque<T> {
// [o I A o o o o o . . . . . . . o]
// M M
let new_tail = self.wrap_index(self.tail - 1);
let new_tail = self.wrap_sub(self.tail, 1);
self.copy(new_tail, self.tail, 1);
// Already moved the tail, so we only copy `i - 1` elements.
@ -1031,7 +1046,7 @@ impl<T> VecDeque<T> {
// M M M
self.copy(idx + 1, idx, self.head - idx);
self.head = self.wrap_index(self.head + 1);
self.head = self.wrap_add(self.head, 1);
},
(false, true, true) => unsafe {
// discontiguous, insert closer to tail, tail section:
@ -1123,7 +1138,7 @@ impl<T> VecDeque<T> {
}
// tail might've been changed so we need to recalculate
let new_idx = self.wrap_index(self.tail + i);
let new_idx = self.wrap_add(self.tail, i);
unsafe {
self.buffer_write(new_idx, t);
}
@ -1170,7 +1185,7 @@ impl<T> VecDeque<T> {
// R - Indicates element that is being removed
// M - Indicates element was moved
let idx = self.wrap_index(self.tail + i);
let idx = self.wrap_add(self.tail, i);
let elem = unsafe {
Some(self.buffer_read(idx))
@ -1219,7 +1234,7 @@ impl<T> VecDeque<T> {
// M M
self.copy(self.tail + 1, self.tail, i);
self.tail = self.wrap_index(self.tail + 1);
self.tail = self.wrap_add(self.tail, 1);
},
(false, false, false) => unsafe {
// discontiguous, remove closer to head, head section:
@ -1265,7 +1280,7 @@ impl<T> VecDeque<T> {
self.copy(0, 1, self.head - 1);
}
self.head = self.wrap_index(self.head - 1);
self.head = self.wrap_sub(self.head, 1);
},
(false, true, false) => unsafe {
// discontiguous, remove closer to tail, head section:
@ -1286,7 +1301,7 @@ impl<T> VecDeque<T> {
// move elements from tail to end forward, excluding the last one
self.copy(self.tail + 1, self.tail, self.cap - self.tail - 1);
self.tail = self.wrap_index(self.tail + 1);
self.tail = self.wrap_add(self.tail, 1);
}
}
@ -1354,7 +1369,7 @@ impl<T> VecDeque<T> {
}
// Cleanup where the ends of the buffers are
self.head = self.wrap_index(self.head - other_len);
self.head = self.wrap_sub(self.head, other_len);
other.head = other.wrap_index(other_len);
other
@ -1429,7 +1444,7 @@ fn wrap_index(index: usize, size: usize) -> usize {
#[inline]
fn count(tail: usize, head: usize, size: usize) -> usize {
// size is always a power of 2
(head - tail) & (size - 1)
(head.wrapping_sub(tail)) & (size - 1)
}
/// `VecDeque` iterator.
@ -1461,7 +1476,7 @@ impl<'a, T> Iterator for Iter<'a, T> {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail + 1, self.ring.len());
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(tail)) }
}
@ -1479,7 +1494,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head - 1, self.ring.len());
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(self.head)) }
}
}
@ -1500,7 +1515,7 @@ impl<'a, T> RandomAccessIterator for Iter<'a, T> {
if j >= self.indexable() {
None
} else {
let idx = wrap_index(self.tail + j, self.ring.len());
let idx = wrap_index(self.tail.wrapping_add(j), self.ring.len());
unsafe { Some(self.ring.get_unchecked(idx)) }
}
}
@ -1524,7 +1539,7 @@ impl<'a, T> Iterator for IterMut<'a, T> {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail + 1, self.ring.len());
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(tail);
@ -1546,7 +1561,7 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head - 1, self.ring.len());
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(self.head);

View File

@ -14,7 +14,7 @@
use prelude::*;
use default::Default;
use num::wrapping::WrappingOps;
use super::Hasher;
/// An implementation of SipHash 2-4.
@ -71,17 +71,17 @@ macro_rules! u8to64_le {
macro_rules! rotl {
($x:expr, $b:expr) =>
(($x << $b) | ($x >> (64 - $b)))
(($x << $b) | ($x >> (64.wrapping_sub($b))))
}
macro_rules! compress {
($v0:expr, $v1:expr, $v2:expr, $v3:expr) =>
({
$v0 += $v1; $v1 = rotl!($v1, 13); $v1 ^= $v0;
$v0 = $v0.wrapping_add($v1); $v1 = rotl!($v1, 13); $v1 ^= $v0;
$v0 = rotl!($v0, 32);
$v2 += $v3; $v3 = rotl!($v3, 16); $v3 ^= $v2;
$v0 += $v3; $v3 = rotl!($v3, 21); $v3 ^= $v0;
$v2 += $v1; $v1 = rotl!($v1, 17); $v1 ^= $v2;
$v2 = $v2.wrapping_add($v3); $v3 = rotl!($v3, 16); $v3 ^= $v2;
$v0 = $v0.wrapping_add($v3); $v3 = rotl!($v3, 21); $v3 ^= $v0;
$v2 = $v2.wrapping_add($v1); $v1 = rotl!($v1, 17); $v1 ^= $v2;
$v2 = rotl!($v2, 32);
})
}

View File

@ -546,3 +546,14 @@ extern "rust-intrinsic" {
/// Performs checked `u64` multiplication.
pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
}
// SNAP 880fb89
#[cfg(not(stage0))]
extern "rust-intrinsic" {
/// Returns (a + b) mod 2^N, where N is the width of N in bits.
pub fn overflowing_add<T>(a: T, b: T) -> T;
/// Returns (a - b) mod 2^N, where N is the width of N in bits.
pub fn overflowing_sub<T>(a: T, b: T) -> T;
/// Returns (a * b) mod 2^N, where N is the width of N in bits.
pub fn overflowing_mul<T>(a: T, b: T) -> T;
}

View File

@ -728,10 +728,11 @@ pub trait IteratorExt: Iterator + Sized {
P: FnMut(Self::Item) -> bool,
Self: ExactSizeIterator + DoubleEndedIterator
{
let mut i = self.len() - 1;
let mut i = self.len();
while let Some(v) = self.next_back() {
if predicate(v) {
return Some(i);
return Some(i - 1);
}
i -= 1;
}
@ -1129,7 +1130,11 @@ impl<I> RandomAccessIterator for Rev<I> where I: DoubleEndedIterator + RandomAcc
#[inline]
fn idx(&mut self, index: usize) -> Option<<I as Iterator>::Item> {
let amt = self.indexable();
self.iter.idx(amt - index - 1)
if amt > index {
self.iter.idx(amt - index - 1)
} else {
None
}
}
}

View File

@ -69,6 +69,7 @@
#![feature(unboxed_closures)]
#![feature(rustc_attrs)]
#![feature(optin_builtin_traits)]
#![feature(concat_idents)]
#[macro_use]
mod macros;

View File

@ -15,6 +15,8 @@
#![stable(feature = "rust1", since = "1.0.0")]
#![allow(missing_docs)]
use self::wrapping::{OverflowingOps, WrappingOps};
use char::CharExt;
use clone::Clone;
use cmp::{PartialEq, Eq, PartialOrd, Ord};
@ -30,6 +32,9 @@ use option::Option::{self, Some, None};
use result::Result::{self, Ok, Err};
use str::{FromStr, StrExt};
#[unstable(feature = "core", reason = "may be removed or relocated")]
pub mod wrapping;
/// A built-in signed or unsigned integer.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Int
@ -48,6 +53,8 @@ pub trait Int
+ BitXor<Output=Self>
+ Shl<uint, Output=Self>
+ Shr<uint, Output=Self>
+ WrappingOps
+ OverflowingOps
{
/// Returns the `0` value of this integer type.
// FIXME (#5527): Should be an associated constant
@ -376,11 +383,23 @@ pub trait Int
let mut base = self;
let mut acc: Self = Int::one();
let mut prev_base = self;
let mut base_oflo = false;
while exp > 0 {
if (exp & 1) == 1 {
acc = acc * base;
if base_oflo {
// ensure overflow occurs in the same manner it
// would have otherwise (i.e. signal any exception
// it would have otherwise).
acc = acc * (prev_base * prev_base);
} else {
acc = acc * base;
}
}
base = base * base;
prev_base = base;
let (new_base, new_base_oflo) = base.overflowing_mul(base);
base = new_base;
base_oflo = new_base_oflo;
exp /= 2;
}
acc
@ -691,12 +710,12 @@ signed_int_impl! { int }
/// A built-in unsigned integer.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait UnsignedInt: Int {
pub trait UnsignedInt: Int + WrappingOps {
/// Returns `true` iff `self == 2^k` for some `k`.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
fn is_power_of_two(self) -> bool {
(self - Int::one()) & self == Int::zero() && !(self == Int::zero())
(self.wrapping_sub(Int::one())) & self == Int::zero() && !(self == Int::zero())
}
/// Returns the smallest power of two greater than or equal to `self`.
@ -706,7 +725,7 @@ pub trait UnsignedInt: Int {
fn next_power_of_two(self) -> Self {
let bits = size_of::<Self>() * 8;
let one: Self = Int::one();
one << ((bits - (self - one).leading_zeros() as usize) % bits)
one << ((bits - self.wrapping_sub(one).leading_zeros() as usize) % bits)
}
/// Returns the smallest power of two greater than or equal to `n`. If the

View File

@ -20,6 +20,6 @@ pub const BYTES : u32 = ($bits / 8);
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: $T = 0 as $T;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: $T = 0 as $T - 1 as $T;
pub const MAX: $T = !0 as $T;
) }

300
src/libcore/num/wrapping.rs Normal file
View File

@ -0,0 +1,300 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs)]
use ops::*;
#[cfg(not(stage0))]
use intrinsics::{overflowing_add, overflowing_sub, overflowing_mul};
use intrinsics::{i8_add_with_overflow, u8_add_with_overflow};
use intrinsics::{i16_add_with_overflow, u16_add_with_overflow};
use intrinsics::{i32_add_with_overflow, u32_add_with_overflow};
use intrinsics::{i64_add_with_overflow, u64_add_with_overflow};
use intrinsics::{i8_sub_with_overflow, u8_sub_with_overflow};
use intrinsics::{i16_sub_with_overflow, u16_sub_with_overflow};
use intrinsics::{i32_sub_with_overflow, u32_sub_with_overflow};
use intrinsics::{i64_sub_with_overflow, u64_sub_with_overflow};
use intrinsics::{i8_mul_with_overflow, u8_mul_with_overflow};
use intrinsics::{i16_mul_with_overflow, u16_mul_with_overflow};
use intrinsics::{i32_mul_with_overflow, u32_mul_with_overflow};
use intrinsics::{i64_mul_with_overflow, u64_mul_with_overflow};
pub trait WrappingOps {
fn wrapping_add(self, rhs: Self) -> Self;
fn wrapping_sub(self, rhs: Self) -> Self;
fn wrapping_mul(self, rhs: Self) -> Self;
}
#[unstable(feature = "core", reason = "may be removed, renamed, or relocated")]
pub trait OverflowingOps {
fn overflowing_add(self, rhs: Self) -> (Self, bool);
fn overflowing_sub(self, rhs: Self) -> (Self, bool);
fn overflowing_mul(self, rhs: Self) -> (Self, bool);
}
#[cfg(not(stage0))]
macro_rules! wrapping_impl {
($($t:ty)*) => ($(
impl WrappingOps for $t {
#[inline(always)]
fn wrapping_add(self, rhs: $t) -> $t {
unsafe {
overflowing_add(self, rhs)
}
}
#[inline(always)]
fn wrapping_sub(self, rhs: $t) -> $t {
unsafe {
overflowing_sub(self, rhs)
}
}
#[inline(always)]
fn wrapping_mul(self, rhs: $t) -> $t {
unsafe {
overflowing_mul(self, rhs)
}
}
}
)*)
}
#[cfg(stage0)]
macro_rules! wrapping_impl {
($($t:ty)*) => ($(
impl WrappingOps for $t {
#[inline(always)]
fn wrapping_add(self, rhs: $t) -> $t {
self + rhs
}
#[inline(always)]
fn wrapping_sub(self, rhs: $t) -> $t {
self - rhs
}
#[inline(always)]
fn wrapping_mul(self, rhs: $t) -> $t {
self * rhs
}
}
)*)
}
wrapping_impl! { uint u8 u16 u32 u64 int i8 i16 i32 i64 }
#[unstable(feature = "core", reason = "may be removed, renamed, or relocated")]
#[derive(PartialEq,Eq,PartialOrd,Ord,Clone,Copy)]
pub struct Wrapping<T>(pub T);
impl<T:WrappingOps> Add for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn add(self, other: Wrapping<T>) -> Wrapping<T> {
Wrapping(self.0.wrapping_add(other.0))
}
}
impl<T:WrappingOps> Sub for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn sub(self, other: Wrapping<T>) -> Wrapping<T> {
Wrapping(self.0.wrapping_sub(other.0))
}
}
impl<T:WrappingOps> Mul for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn mul(self, other: Wrapping<T>) -> Wrapping<T> {
Wrapping(self.0.wrapping_mul(other.0))
}
}
impl<T:WrappingOps+Not<Output=T>> Not for Wrapping<T> {
type Output = Wrapping<T>;
fn not(self) -> Wrapping<T> {
Wrapping(!self.0)
}
}
impl<T:WrappingOps+BitXor<Output=T>> BitXor for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn bitxor(self, other: Wrapping<T>) -> Wrapping<T> {
Wrapping(self.0 ^ other.0)
}
}
impl<T:WrappingOps+BitOr<Output=T>> BitOr for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn bitor(self, other: Wrapping<T>) -> Wrapping<T> {
Wrapping(self.0 | other.0)
}
}
impl<T:WrappingOps+BitAnd<Output=T>> BitAnd for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn bitand(self, other: Wrapping<T>) -> Wrapping<T> {
Wrapping(self.0 & other.0)
}
}
impl<T:WrappingOps+Shl<uint,Output=T>> Shl<uint> for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn shl(self, other: uint) -> Wrapping<T> {
Wrapping(self.0 << other)
}
}
impl<T:WrappingOps+Shr<uint,Output=T>> Shr<uint> for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn shr(self, other: uint) -> Wrapping<T> {
Wrapping(self.0 >> other)
}
}
macro_rules! overflowing_impl {
($($t:ident)*) => ($(
impl OverflowingOps for $t {
#[inline(always)]
fn overflowing_add(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _add_with_overflow)(self, rhs)
}
}
#[inline(always)]
fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _sub_with_overflow)(self, rhs)
}
}
#[inline(always)]
fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
unsafe {
concat_idents!($t, _mul_with_overflow)(self, rhs)
}
}
}
)*)
}
overflowing_impl! { u8 u16 u32 u64 i8 i16 i32 i64 }
#[cfg(target_pointer_width = "64")]
impl OverflowingOps for usize {
#[inline(always)]
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_add_with_overflow(self as u64, rhs as u64);
(res.0 as usize, res.1)
}
}
#[inline(always)]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_sub_with_overflow(self as u64, rhs as u64);
(res.0 as usize, res.1)
}
}
#[inline(always)]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_mul_with_overflow(self as u64, rhs as u64);
(res.0 as usize, res.1)
}
}
}
#[cfg(target_pointer_width = "32")]
impl OverflowingOps for usize {
#[inline(always)]
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_add_with_overflow(self as u32, rhs as u32);
(res.0 as usize, res.1)
}
}
#[inline(always)]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_sub_with_overflow(self as u32, rhs as u32);
(res.0 as usize, res.1)
}
}
#[inline(always)]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_mul_with_overflow(self as u32, rhs as u32);
(res.0 as usize, res.1)
}
}
}
#[cfg(target_pointer_width = "64")]
impl OverflowingOps for isize {
#[inline(always)]
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i64_add_with_overflow(self as i64, rhs as i64);
(res.0 as isize, res.1)
}
}
#[inline(always)]
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i64_sub_with_overflow(self as i64, rhs as i64);
(res.0 as isize, res.1)
}
}
#[inline(always)]
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i64_mul_with_overflow(self as i64, rhs as i64);
(res.0 as isize, res.1)
}
}
}
#[cfg(target_pointer_width = "32")]
impl OverflowingOps for isize {
#[inline(always)]
fn overflowing_add(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i32_add_with_overflow(self as i32, rhs as i32);
(res.0 as isize, res.1)
}
}
#[inline(always)]
fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i32_sub_with_overflow(self as i32, rhs as i32);
(res.0 as isize, res.1)
}
}
#[inline(always)]
fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
unsafe {
let res = i32_mul_with_overflow(self as i32, rhs as i32);
(res.0 as isize, res.1)
}
}
}

View File

@ -841,6 +841,7 @@ impl TwoWaySearcher {
#[inline]
#[allow(dead_code)]
fn maximal_suffix(arr: &[u8], reversed: bool) -> (usize, usize) {
use num::wrapping::WrappingOps;
let mut left = -1; // Corresponds to i in the paper
let mut right = 0; // Corresponds to j in the paper
let mut offset = 1; // Corresponds to k in the paper
@ -850,17 +851,17 @@ impl TwoWaySearcher {
let a;
let b;
if reversed {
a = arr[left + offset];
a = arr[left.wrapping_add(offset)];
b = arr[right + offset];
} else {
a = arr[right + offset];
b = arr[left + offset];
b = arr[left.wrapping_add(offset)];
}
if a < b {
// Suffix is smaller, period is entire prefix so far.
right += offset;
offset = 1;
period = right - left;
period = right.wrapping_sub(left);
} else if a == b {
// Advance through repetition of the current period.
if offset == period {
@ -877,7 +878,7 @@ impl TwoWaySearcher {
period = 1;
}
}
(left + 1, period)
(left.wrapping_add(1), period)
}
}

View File

@ -92,7 +92,7 @@ mod test {
assert_eq!("127".parse::<i8>().ok(), Some(i8_val));
assert_eq!("128".parse::<i8>().ok(), None);
i8_val += 1 as i8;
i8_val = i8_val.wrapping_add(1);
assert_eq!("-128".parse::<i8>().ok(), Some(i8_val));
assert_eq!("-129".parse::<i8>().ok(), None);
@ -100,7 +100,7 @@ mod test {
assert_eq!("32767".parse::<i16>().ok(), Some(i16_val));
assert_eq!("32768".parse::<i16>().ok(), None);
i16_val += 1 as i16;
i16_val = i16_val.wrapping_add(1);
assert_eq!("-32768".parse::<i16>().ok(), Some(i16_val));
assert_eq!("-32769".parse::<i16>().ok(), None);
@ -108,7 +108,7 @@ mod test {
assert_eq!("2147483647".parse::<i32>().ok(), Some(i32_val));
assert_eq!("2147483648".parse::<i32>().ok(), None);
i32_val += 1 as i32;
i32_val = i32_val.wrapping_add(1);
assert_eq!("-2147483648".parse::<i32>().ok(), Some(i32_val));
assert_eq!("-2147483649".parse::<i32>().ok(), None);
@ -116,7 +116,7 @@ mod test {
assert_eq!("9223372036854775807".parse::<i64>().ok(), Some(i64_val));
assert_eq!("9223372036854775808".parse::<i64>().ok(), None);
i64_val += 1 as i64;
i64_val = i64_val.wrapping_add(1);
assert_eq!("-9223372036854775808".parse::<i64>().ok(), Some(i64_val));
assert_eq!("-9223372036854775809".parse::<i64>().ok(), None);
}

View File

@ -12,6 +12,7 @@
use core::prelude::*;
use core::num::Int;
use core::num::wrapping::WrappingOps;
use {Rng, SeedableRng, Rand};
const KEY_WORDS : uint = 8; // 8 words for the 256-bit key
@ -43,10 +44,10 @@ static EMPTY: ChaChaRng = ChaChaRng {
macro_rules! quarter_round{
($a: expr, $b: expr, $c: expr, $d: expr) => {{
$a += $b; $d ^= $a; $d = $d.rotate_left(16);
$c += $d; $b ^= $c; $b = $b.rotate_left(12);
$a += $b; $d ^= $a; $d = $d.rotate_left( 8);
$c += $d; $b ^= $c; $b = $b.rotate_left( 7);
$a = $a.wrapping_add($b); $d = $d ^ $a; $d = $d.rotate_left(16);
$c = $c.wrapping_add($d); $b = $b ^ $c; $b = $b.rotate_left(12);
$a = $a.wrapping_add($b); $d = $d ^ $a; $d = $d.rotate_left( 8);
$c = $c.wrapping_add($d); $b = $b ^ $c; $b = $b.rotate_left( 7);
}}
}
@ -74,7 +75,7 @@ fn core(output: &mut [u32; STATE_WORDS], input: &[u32; STATE_WORDS]) {
}
for i in 0..STATE_WORDS {
output[i] += input[i];
output[i] = output[i].wrapping_add(input[i]);
}
}

View File

@ -14,6 +14,7 @@
use core::prelude::{PartialOrd};
use core::num::Int;
use core::num::wrapping::WrappingOps;
use Rng;
use distributions::{Sample, IndependentSample};
@ -97,7 +98,7 @@ macro_rules! integer_impl {
// bijection.
fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
let range = high as $unsigned - low as $unsigned;
let range = (high as $unsigned).wrapping_sub(low as $unsigned);
let unsigned_max: $unsigned = Int::max_value();
// this is the largest number that fits into $unsigned
@ -122,7 +123,7 @@ macro_rules! integer_impl {
// be uniformly distributed)
if v < r.accept_zone as $unsigned {
// and return it, with some adjustments
return r.low + (v % r.range as $unsigned) as $ty;
return r.low.wrapping_add((v % r.range as $unsigned) as $ty);
}
}
}

View File

@ -13,6 +13,7 @@
use core::prelude::*;
use core::slice;
use core::iter::{range_step, repeat};
use core::num::wrapping::Wrapping;
use {Rng, SeedableRng, Rand};
@ -60,7 +61,7 @@ impl IsaacRng {
/// of `rsl` as a seed, otherwise construct one algorithmically (not
/// randomly).
fn init(&mut self, use_rsl: bool) {
let mut a = 0x9e3779b9;
let mut a = Wrapping(0x9e3779b9);
let mut b = a;
let mut c = a;
let mut d = a;
@ -71,14 +72,14 @@ impl IsaacRng {
macro_rules! mix {
() => {{
a^=b<<11; d+=a; b+=c;
b^=c>>2; e+=b; c+=d;
c^=d<<8; f+=c; d+=e;
d^=e>>16; g+=d; e+=f;
e^=f<<10; h+=e; f+=g;
f^=g>>4; a+=f; g+=h;
g^=h<<8; b+=g; h+=a;
h^=a>>9; c+=h; a+=b;
a=a^(b<<11); d=d+a; b=b+c;
b=b^(c>>2); e=e+b; c=c+d;
c=c^(d<<8); f=f+c; d=d+e;
d=d^(e>>16); g=g+d; e=e+f;
e=e^(f<<10); h=h+e; f=f+g;
f=f^(g>>4); a=a+f; g=g+h;
g=g^(h<<8); b=b+g; h=h+a;
h=h^(a>>9); c=c+h; a=a+b;
}}
}
@ -90,15 +91,15 @@ impl IsaacRng {
macro_rules! memloop {
($arr:expr) => {{
for i in range_step(0, RAND_SIZE as uint, 8) {
a+=$arr[i ]; b+=$arr[i+1];
c+=$arr[i+2]; d+=$arr[i+3];
e+=$arr[i+4]; f+=$arr[i+5];
g+=$arr[i+6]; h+=$arr[i+7];
a=a+Wrapping($arr[i ]); b=b+Wrapping($arr[i+1]);
c=c+Wrapping($arr[i+2]); d=d+Wrapping($arr[i+3]);
e=e+Wrapping($arr[i+4]); f=f+Wrapping($arr[i+5]);
g=g+Wrapping($arr[i+6]); h=h+Wrapping($arr[i+7]);
mix!();
self.mem[i ]=a; self.mem[i+1]=b;
self.mem[i+2]=c; self.mem[i+3]=d;
self.mem[i+4]=e; self.mem[i+5]=f;
self.mem[i+6]=g; self.mem[i+7]=h;
self.mem[i ]=a.0; self.mem[i+1]=b.0;
self.mem[i+2]=c.0; self.mem[i+3]=d.0;
self.mem[i+4]=e.0; self.mem[i+5]=f.0;
self.mem[i+6]=g.0; self.mem[i+7]=h.0;
}
}}
}
@ -108,10 +109,10 @@ impl IsaacRng {
} else {
for i in range_step(0, RAND_SIZE as uint, 8) {
mix!();
self.mem[i ]=a; self.mem[i+1]=b;
self.mem[i+2]=c; self.mem[i+3]=d;
self.mem[i+4]=e; self.mem[i+5]=f;
self.mem[i+6]=g; self.mem[i+7]=h;
self.mem[i ]=a.0; self.mem[i+1]=b.0;
self.mem[i+2]=c.0; self.mem[i+3]=d.0;
self.mem[i+4]=e.0; self.mem[i+5]=f.0;
self.mem[i+6]=g.0; self.mem[i+7]=h.0;
}
}
@ -130,7 +131,8 @@ impl IsaacRng {
const MIDPOINT: uint = (RAND_SIZE / 2) as uint;
macro_rules! ind {
($x:expr) => ( self.mem[(($x >> 2) as uint & ((RAND_SIZE - 1) as uint))] )
($x:expr) => (Wrapping( self.mem[(($x >> 2) as uint &
((RAND_SIZE - 1) as uint))] ))
}
let r = [(0, MIDPOINT), (MIDPOINT, 0)];
@ -142,11 +144,11 @@ impl IsaacRng {
let mix = a << $shift as uint;
let x = self.mem[base + mr_offset];
a = (a ^ mix) + self.mem[base + m2_offset];
let y = ind!(x) + a + b;
self.mem[base + mr_offset] = y;
a = (Wrapping(a ^ mix) + Wrapping(self.mem[base + m2_offset])).0;
let y = ind!(x) + Wrapping(a) + Wrapping(b);
self.mem[base + mr_offset] = y.0;
b = ind!(y >> RAND_SIZE_LEN as uint) + x;
b = (ind!(y.0 >> RAND_SIZE_LEN as uint) + Wrapping(x)).0;
self.rsl[base + mr_offset] = b;
}}
}
@ -157,11 +159,11 @@ impl IsaacRng {
let mix = a >> $shift as uint;
let x = self.mem[base + mr_offset];
a = (a ^ mix) + self.mem[base + m2_offset];
let y = ind!(x) + a + b;
self.mem[base + mr_offset] = y;
a = (Wrapping(a ^ mix) + Wrapping(self.mem[base + m2_offset])).0;
let y = ind!(x) + Wrapping(a) + Wrapping(b);
self.mem[base + mr_offset] = y.0;
b = ind!(y >> RAND_SIZE_LEN as uint) + x;
b = (ind!(y.0 >> RAND_SIZE_LEN as uint) + Wrapping(x)).0;
self.rsl[base + mr_offset] = b;
}}
}
@ -304,7 +306,7 @@ impl Isaac64Rng {
fn init(&mut self, use_rsl: bool) {
macro_rules! init {
($var:ident) => (
let mut $var = 0x9e3779b97f4a7c13;
let mut $var = Wrapping(0x9e3779b97f4a7c13);
)
}
init!(a); init!(b); init!(c); init!(d);
@ -312,14 +314,14 @@ impl Isaac64Rng {
macro_rules! mix {
() => {{
a-=e; f^=h>>9; h+=a;
b-=f; g^=a<<9; a+=b;
c-=g; h^=b>>23; b+=c;
d-=h; a^=c<<15; c+=d;
e-=a; b^=d>>14; d+=e;
f-=b; c^=e<<20; e+=f;
g-=c; d^=f>>17; f+=g;
h-=d; e^=g<<14; g+=h;
a=a-e; f=f^h>>9; h=h+a;
b=b-f; g=g^a<<9; a=a+b;
c=c-g; h=h^b>>23; b=b+c;
d=d-h; a=a^c<<15; c=c+d;
e=e-a; b=b^d>>14; d=d+e;
f=f-b; c=c^e<<20; e=e+f;
g=g-c; d=d^f>>17; f=f+g;
h=h-d; e=e^g<<14; g=g+h;
}}
}
@ -331,15 +333,15 @@ impl Isaac64Rng {
macro_rules! memloop {
($arr:expr) => {{
for i in (0..RAND_SIZE_64 / 8).map(|i| i * 8) {
a+=$arr[i ]; b+=$arr[i+1];
c+=$arr[i+2]; d+=$arr[i+3];
e+=$arr[i+4]; f+=$arr[i+5];
g+=$arr[i+6]; h+=$arr[i+7];
a=a+Wrapping($arr[i ]); b=b+Wrapping($arr[i+1]);
c=c+Wrapping($arr[i+2]); d=d+Wrapping($arr[i+3]);
e=e+Wrapping($arr[i+4]); f=f+Wrapping($arr[i+5]);
g=g+Wrapping($arr[i+6]); h=h+Wrapping($arr[i+7]);
mix!();
self.mem[i ]=a; self.mem[i+1]=b;
self.mem[i+2]=c; self.mem[i+3]=d;
self.mem[i+4]=e; self.mem[i+5]=f;
self.mem[i+6]=g; self.mem[i+7]=h;
self.mem[i ]=a.0; self.mem[i+1]=b.0;
self.mem[i+2]=c.0; self.mem[i+3]=d.0;
self.mem[i+4]=e.0; self.mem[i+5]=f.0;
self.mem[i+6]=g.0; self.mem[i+7]=h.0;
}
}}
}
@ -349,10 +351,10 @@ impl Isaac64Rng {
} else {
for i in (0..RAND_SIZE_64 / 8).map(|i| i * 8) {
mix!();
self.mem[i ]=a; self.mem[i+1]=b;
self.mem[i+2]=c; self.mem[i+3]=d;
self.mem[i+4]=e; self.mem[i+5]=f;
self.mem[i+6]=g; self.mem[i+7]=h;
self.mem[i ]=a.0; self.mem[i+1]=b.0;
self.mem[i+2]=c.0; self.mem[i+3]=d.0;
self.mem[i+4]=e.0; self.mem[i+5]=f.0;
self.mem[i+6]=g.0; self.mem[i+7]=h.0;
}
}
@ -363,8 +365,8 @@ impl Isaac64Rng {
fn isaac64(&mut self) {
self.c += 1;
// abbreviations
let mut a = self.a;
let mut b = self.b + self.c;
let mut a = Wrapping(self.a);
let mut b = Wrapping(self.b) + Wrapping(self.c);
const MIDPOINT: uint = RAND_SIZE_64 / 2;
const MP_VEC: [(uint, uint); 2] = [(0,MIDPOINT), (MIDPOINT, 0)];
macro_rules! ind {
@ -383,13 +385,13 @@ impl Isaac64Rng {
let mix = if $j == 0 {!mix} else {mix};
unsafe {
let x = *self.mem.get_unchecked(base + mr_offset);
a = mix + *self.mem.get_unchecked(base + m2_offset);
let y = ind!(x) + a + b;
*self.mem.get_unchecked_mut(base + mr_offset) = y;
let x = Wrapping(*self.mem.get_unchecked(base + mr_offset));
a = mix + Wrapping(*self.mem.get_unchecked(base + m2_offset));
let y = Wrapping(ind!(x.0)) + a + b;
*self.mem.get_unchecked_mut(base + mr_offset) = y.0;
b = ind!(y >> RAND_SIZE_64_LEN) + x;
*self.rsl.get_unchecked_mut(base + mr_offset) = b;
b = Wrapping(ind!(y.0 >> RAND_SIZE_64_LEN)) + x;
*self.rsl.get_unchecked_mut(base + mr_offset) = b.0;
}
}}
}
@ -401,13 +403,13 @@ impl Isaac64Rng {
let mix = if $j == 0 {!mix} else {mix};
unsafe {
let x = *self.mem.get_unchecked(base + mr_offset);
a = mix + *self.mem.get_unchecked(base + m2_offset);
let y = ind!(x) + a + b;
*self.mem.get_unchecked_mut(base + mr_offset) = y;
let x = Wrapping(*self.mem.get_unchecked(base + mr_offset));
a = mix + Wrapping(*self.mem.get_unchecked(base + m2_offset));
let y = Wrapping(ind!(x.0)) + a + b;
*self.mem.get_unchecked_mut(base + mr_offset) = y.0;
b = ind!(y >> RAND_SIZE_64_LEN) + x;
*self.rsl.get_unchecked_mut(base + mr_offset) = b;
b = Wrapping(ind!(y.0 >> RAND_SIZE_64_LEN)) + x;
*self.rsl.get_unchecked_mut(base + mr_offset) = b.0;
}
}}
}
@ -419,8 +421,8 @@ impl Isaac64Rng {
}
}
self.a = a;
self.b = b;
self.a = a.0;
self.b = b.0;
self.cnt = RAND_SIZE_64;
}
}

View File

@ -783,7 +783,7 @@ pub fn get_enum_variants<'tcx>(intr: Rc<IdentInterner>, cdata: Cmd, id: ast::Nod
_ => { /* empty */ }
}
let old_disr_val = disr_val;
disr_val += 1;
disr_val = disr_val.wrapping_add(1);
Rc::new(ty::VariantInfo {
args: arg_tys,
arg_names: arg_names,

View File

@ -347,7 +347,7 @@ fn encode_enum_variant_info(ecx: &EncodeContext,
ecx.tcx.map.with_path(variant.node.id, |path| encode_path(rbml_w, path));
rbml_w.end_tag();
disr_val += 1;
disr_val = disr_val.wrapping_add(1);
i += 1;
}
}

View File

@ -204,7 +204,9 @@ impl<'a, 'b, 'tcx> DecodeContext<'a, 'b, 'tcx> {
pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId {
// from_id_range should be non-empty
assert!(!self.from_id_range.empty());
(id - self.from_id_range.min + self.to_id_range.min)
// Use wrapping arithmetic because otherwise it introduces control flow.
// Maybe we should just have the control flow? -- aatch
(id.wrapping_sub(self.from_id_range.min).wrapping_add(self.to_id_range.min))
}
/// Translates an EXTERNAL def-id, converting the crate number from the one used in the encoded

View File

@ -307,8 +307,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
match const_eval::eval_const_expr_partial(self.tcx, ex, None) {
Ok(_) => {}
Err(msg) => {
span_err!(self.tcx.sess, ex.span, E0020,
"{} in a constant expression", msg)
span_err!(self.tcx.sess, msg.span, E0020,
"{} in a constant expression",
msg.description())
}
}
}

View File

@ -13,7 +13,8 @@ use self::Usefulness::*;
use self::WitnessPreference::*;
use middle::const_eval::{compare_const_vals, const_bool, const_float, const_val};
use middle::const_eval::{const_expr_to_pat, eval_const_expr, lookup_const_by_id};
use middle::const_eval::{eval_const_expr, eval_const_expr_partial};
use middle::const_eval::{const_expr_to_pat, lookup_const_by_id};
use middle::def::*;
use middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor, Init};
use middle::expr_use_visitor::{JustWrite, LoanCause, MutateMode};
@ -229,13 +230,6 @@ fn check_expr(cx: &mut MatchCheckCtxt, ex: &ast::Expr) {
}
}
fn is_expr_const_nan(tcx: &ty::ctxt, expr: &ast::Expr) -> bool {
match eval_const_expr(tcx, expr) {
const_float(f) => f.is_nan(),
_ => false
}
}
fn check_for_bindings_named_the_same_as_variants(cx: &MatchCheckCtxt, pat: &Pat) {
ast_util::walk_pat(pat, |p| {
match p.node {
@ -269,13 +263,26 @@ fn check_for_bindings_named_the_same_as_variants(cx: &MatchCheckCtxt, pat: &Pat)
// Check that we do not match against a static NaN (#6804)
fn check_for_static_nan(cx: &MatchCheckCtxt, pat: &Pat) {
ast_util::walk_pat(pat, |p| {
match p.node {
ast::PatLit(ref expr) if is_expr_const_nan(cx.tcx, &**expr) => {
span_warn!(cx.tcx.sess, p.span, E0003,
"unmatchable NaN in pattern, \
use the is_nan method in a guard instead");
if let ast::PatLit(ref expr) = p.node {
match eval_const_expr_partial(cx.tcx, &**expr, None) {
Ok(const_float(f)) if f.is_nan() => {
span_warn!(cx.tcx.sess, p.span, E0003,
"unmatchable NaN in pattern, \
use the is_nan method in a guard instead");
}
Ok(_) => {}
Err(err) => {
let subspan = p.span.lo <= err.span.lo && err.span.hi <= p.span.hi;
cx.tcx.sess.span_err(err.span,
&format!("constant evaluation error: {}",
err.description().as_slice()));
if !subspan {
cx.tcx.sess.span_note(p.span,
"in pattern here")
}
}
}
_ => ()
}
true
});

View File

@ -25,6 +25,8 @@ use syntax::parse::token::InternedString;
use syntax::ptr::P;
use syntax::{ast_map, ast_util, codemap};
use std::borrow::{Cow, IntoCow};
use std::num::wrapping::OverflowingOps;
use std::cmp::Ordering;
use std::collections::hash_map::Entry::Vacant;
use std::{i8, i16, i32, i64};
@ -202,35 +204,153 @@ pub fn const_expr_to_pat(tcx: &ty::ctxt, expr: &Expr, span: Span) -> P<ast::Pat>
pub fn eval_const_expr(tcx: &ty::ctxt, e: &Expr) -> const_val {
match eval_const_expr_partial(tcx, e, None) {
Ok(r) => r,
Err(s) => tcx.sess.span_fatal(e.span, &s[..])
Err(s) => tcx.sess.span_fatal(s.span, s.description().as_slice())
}
}
#[derive(Clone)]
pub struct ConstEvalErr {
pub span: Span,
pub kind: ErrKind,
}
#[derive(Clone)]
pub enum ErrKind {
CannotCast,
CannotCastTo(&'static str),
InvalidOpForBools(ast::BinOp_),
InvalidOpForFloats(ast::BinOp_),
InvalidOpForIntUint(ast::BinOp_),
InvalidOpForUintInt(ast::BinOp_),
NegateOnString,
NegateOnBoolean,
NegateOnBinary,
NotOnFloat,
NotOnString,
NotOnBinary,
AddiWithOverflow(i64, i64),
SubiWithOverflow(i64, i64),
MuliWithOverflow(i64, i64),
AdduWithOverflow(u64, u64),
SubuWithOverflow(u64, u64),
MuluWithOverflow(u64, u64),
DivideByZero,
DivideWithOverflow,
ModuloByZero,
ModuloWithOverflow,
MissingStructField,
NonConstPath,
NonConstStruct,
TupleIndexOutOfBounds,
MiscBinaryOp,
MiscCatchAll,
}
impl ConstEvalErr {
pub fn description(&self) -> Cow<str> {
use self::ErrKind::*;
match self.kind {
CannotCast => "can't cast this type".into_cow(),
CannotCastTo(s) => format!("can't cast this type to {}", s).into_cow(),
InvalidOpForBools(_) => "can't do this op on bools".into_cow(),
InvalidOpForFloats(_) => "can't do this op on floats".into_cow(),
InvalidOpForIntUint(..) => "can't do this op on an int and uint".into_cow(),
InvalidOpForUintInt(..) => "can't do this op on a uint and int".into_cow(),
NegateOnString => "negate on string".into_cow(),
NegateOnBoolean => "negate on boolean".into_cow(),
NegateOnBinary => "negate on binary literal".into_cow(),
NotOnFloat => "not on float or string".into_cow(),
NotOnString => "not on float or string".into_cow(),
NotOnBinary => "not on binary literal".into_cow(),
AddiWithOverflow(..) => "attempted to add with overflow".into_cow(),
SubiWithOverflow(..) => "attempted to sub with overflow".into_cow(),
MuliWithOverflow(..) => "attempted to mul with overflow".into_cow(),
AdduWithOverflow(..) => "attempted to add with overflow".into_cow(),
SubuWithOverflow(..) => "attempted to sub with overflow".into_cow(),
MuluWithOverflow(..) => "attempted to mul with overflow".into_cow(),
DivideByZero => "attempted to divide by zero".into_cow(),
DivideWithOverflow => "attempted to divide with overflow".into_cow(),
ModuloByZero => "attempted remainder with a divisor of zero".into_cow(),
ModuloWithOverflow => "attempted remainder with overflow".into_cow(),
MissingStructField => "nonexistent struct field".into_cow(),
NonConstPath => "non-constant path in constant expr".into_cow(),
NonConstStruct => "non-constant struct in constant expr".into_cow(),
TupleIndexOutOfBounds => "tuple index out of bounds".into_cow(),
MiscBinaryOp => "bad operands for binary".into_cow(),
MiscCatchAll => "unsupported constant expr".into_cow(),
}
}
}
macro_rules! signal {
($e:expr, $ctor:ident) => {
return Err(ConstEvalErr { span: $e.span, kind: ErrKind::$ctor })
};
($e:expr, $ctor:ident($($arg:expr),*)) => {
return Err(ConstEvalErr { span: $e.span, kind: ErrKind::$ctor($($arg),*) })
}
}
fn checked_add_int(e: &Expr, a: i64, b: i64) -> Result<const_val, ConstEvalErr> {
let (ret, oflo) = a.overflowing_add(b);
if !oflo { Ok(const_int(ret)) } else { signal!(e, AddiWithOverflow(a, b)) }
}
fn checked_sub_int(e: &Expr, a: i64, b: i64) -> Result<const_val, ConstEvalErr> {
let (ret, oflo) = a.overflowing_sub(b);
if !oflo { Ok(const_int(ret)) } else { signal!(e, SubiWithOverflow(a, b)) }
}
fn checked_mul_int(e: &Expr, a: i64, b: i64) -> Result<const_val, ConstEvalErr> {
let (ret, oflo) = a.overflowing_mul(b);
if !oflo { Ok(const_int(ret)) } else { signal!(e, MuliWithOverflow(a, b)) }
}
fn checked_add_uint(e: &Expr, a: u64, b: u64) -> Result<const_val, ConstEvalErr> {
let (ret, oflo) = a.overflowing_add(b);
if !oflo { Ok(const_uint(ret)) } else { signal!(e, AdduWithOverflow(a, b)) }
}
fn checked_sub_uint(e: &Expr, a: u64, b: u64) -> Result<const_val, ConstEvalErr> {
let (ret, oflo) = a.overflowing_sub(b);
if !oflo { Ok(const_uint(ret)) } else { signal!(e, SubuWithOverflow(a, b)) }
}
fn checked_mul_uint(e: &Expr, a: u64, b: u64) -> Result<const_val, ConstEvalErr> {
let (ret, oflo) = a.overflowing_mul(b);
if !oflo { Ok(const_uint(ret)) } else { signal!(e, MuluWithOverflow(a, b)) }
}
pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
e: &Expr,
ty_hint: Option<Ty<'tcx>>)
-> Result<const_val, String> {
fn fromb(b: bool) -> Result<const_val, String> { Ok(const_int(b as i64)) }
-> Result<const_val, ConstEvalErr> {
fn fromb(b: bool) -> const_val { const_int(b as i64) }
let ety = ty_hint.or_else(|| ty::expr_ty_opt(tcx, e));
match e.node {
let result = match e.node {
ast::ExprUnary(ast::UnNeg, ref inner) => {
match eval_const_expr_partial(tcx, &**inner, ety) {
Ok(const_float(f)) => Ok(const_float(-f)),
Ok(const_int(i)) => Ok(const_int(-i)),
Ok(const_uint(i)) => Ok(const_uint(-i)),
Ok(const_str(_)) => Err("negate on string".to_string()),
Ok(const_bool(_)) => Err("negate on boolean".to_string()),
ref err => ((*err).clone())
match try!(eval_const_expr_partial(tcx, &**inner, ety)) {
const_float(f) => const_float(-f),
const_int(i) => const_int(-i),
const_uint(i) => const_uint(-i),
const_str(_) => signal!(e, NegateOnString),
const_bool(_) => signal!(e, NegateOnBoolean),
const_binary(_) => signal!(e, NegateOnBinary),
}
}
ast::ExprUnary(ast::UnNot, ref inner) => {
match eval_const_expr_partial(tcx, &**inner, ety) {
Ok(const_int(i)) => Ok(const_int(!i)),
Ok(const_uint(i)) => Ok(const_uint(!i)),
Ok(const_bool(b)) => Ok(const_bool(!b)),
_ => Err("not on float or string".to_string())
match try!(eval_const_expr_partial(tcx, &**inner, ety)) {
const_int(i) => const_int(!i),
const_uint(i) => const_uint(!i),
const_bool(b) => const_bool(!b),
const_str(_) => signal!(e, NotOnString),
const_float(_) => signal!(e, NotOnFloat),
const_binary(_) => signal!(e, NotOnBinary),
}
}
ast::ExprBinary(op, ref a, ref b) => {
@ -238,25 +358,25 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
ast::BiShl | ast::BiShr => Some(tcx.types.uint),
_ => ety
};
match (eval_const_expr_partial(tcx, &**a, ety),
eval_const_expr_partial(tcx, &**b, b_ty)) {
(Ok(const_float(a)), Ok(const_float(b))) => {
match (try!(eval_const_expr_partial(tcx, &**a, ety)),
try!(eval_const_expr_partial(tcx, &**b, b_ty))) {
(const_float(a), const_float(b)) => {
match op.node {
ast::BiAdd => Ok(const_float(a + b)),
ast::BiSub => Ok(const_float(a - b)),
ast::BiMul => Ok(const_float(a * b)),
ast::BiDiv => Ok(const_float(a / b)),
ast::BiRem => Ok(const_float(a % b)),
ast::BiAdd => const_float(a + b),
ast::BiSub => const_float(a - b),
ast::BiMul => const_float(a * b),
ast::BiDiv => const_float(a / b),
ast::BiRem => const_float(a % b),
ast::BiEq => fromb(a == b),
ast::BiLt => fromb(a < b),
ast::BiLe => fromb(a <= b),
ast::BiNe => fromb(a != b),
ast::BiGe => fromb(a >= b),
ast::BiGt => fromb(a > b),
_ => Err("can't do this op on floats".to_string())
_ => signal!(e, InvalidOpForFloats(op.node))
}
}
(Ok(const_int(a)), Ok(const_int(b))) => {
(const_int(a), const_int(b)) => {
let is_a_min_value = || {
let int_ty = match ty::expr_ty_opt(tcx, e).map(|ty| &ty.sty) {
Some(&ty::ty_int(int_ty)) => int_ty,
@ -276,32 +396,32 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
}
};
match op.node {
ast::BiAdd => Ok(const_int(a + b)),
ast::BiSub => Ok(const_int(a - b)),
ast::BiMul => Ok(const_int(a * b)),
ast::BiAdd => try!(checked_add_int(e, a, b)),
ast::BiSub => try!(checked_sub_int(e, a, b)),
ast::BiMul => try!(checked_mul_int(e, a, b)),
ast::BiDiv => {
if b == 0 {
Err("attempted to divide by zero".to_string())
signal!(e, DivideByZero);
} else if b == -1 && is_a_min_value() {
Err("attempted to divide with overflow".to_string())
signal!(e, DivideWithOverflow);
} else {
Ok(const_int(a / b))
const_int(a / b)
}
}
ast::BiRem => {
if b == 0 {
Err("attempted remainder with a divisor of zero".to_string())
signal!(e, ModuloByZero)
} else if b == -1 && is_a_min_value() {
Err("attempted remainder with overflow".to_string())
signal!(e, ModuloWithOverflow)
} else {
Ok(const_int(a % b))
const_int(a % b)
}
}
ast::BiAnd | ast::BiBitAnd => Ok(const_int(a & b)),
ast::BiOr | ast::BiBitOr => Ok(const_int(a | b)),
ast::BiBitXor => Ok(const_int(a ^ b)),
ast::BiShl => Ok(const_int(a << b as uint)),
ast::BiShr => Ok(const_int(a >> b as uint)),
ast::BiAnd | ast::BiBitAnd => const_int(a & b),
ast::BiOr | ast::BiBitOr => const_int(a | b),
ast::BiBitXor => const_int(a ^ b),
ast::BiShl => const_int(a << b as uint),
ast::BiShr => const_int(a >> b as uint),
ast::BiEq => fromb(a == b),
ast::BiLt => fromb(a < b),
ast::BiLe => fromb(a <= b),
@ -310,25 +430,20 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
ast::BiGt => fromb(a > b)
}
}
(Ok(const_uint(a)), Ok(const_uint(b))) => {
(const_uint(a), const_uint(b)) => {
match op.node {
ast::BiAdd => Ok(const_uint(a + b)),
ast::BiSub => Ok(const_uint(a - b)),
ast::BiMul => Ok(const_uint(a * b)),
ast::BiDiv if b == 0 => {
Err("attempted to divide by zero".to_string())
}
ast::BiDiv => Ok(const_uint(a / b)),
ast::BiRem if b == 0 => {
Err("attempted remainder with a divisor of \
zero".to_string())
}
ast::BiRem => Ok(const_uint(a % b)),
ast::BiAnd | ast::BiBitAnd => Ok(const_uint(a & b)),
ast::BiOr | ast::BiBitOr => Ok(const_uint(a | b)),
ast::BiBitXor => Ok(const_uint(a ^ b)),
ast::BiShl => Ok(const_uint(a << b as uint)),
ast::BiShr => Ok(const_uint(a >> b as uint)),
ast::BiAdd => try!(checked_add_uint(e, a, b)),
ast::BiSub => try!(checked_sub_uint(e, a, b)),
ast::BiMul => try!(checked_mul_uint(e, a, b)),
ast::BiDiv if b == 0 => signal!(e, DivideByZero),
ast::BiDiv => const_uint(a / b),
ast::BiRem if b == 0 => signal!(e, ModuloByZero),
ast::BiRem => const_uint(a % b),
ast::BiAnd | ast::BiBitAnd => const_uint(a & b),
ast::BiOr | ast::BiBitOr => const_uint(a | b),
ast::BiBitXor => const_uint(a ^ b),
ast::BiShl => const_uint(a << b as uint),
ast::BiShr => const_uint(a >> b as uint),
ast::BiEq => fromb(a == b),
ast::BiLt => fromb(a < b),
ast::BiLe => fromb(a <= b),
@ -338,22 +453,22 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
}
}
// shifts can have any integral type as their rhs
(Ok(const_int(a)), Ok(const_uint(b))) => {
(const_int(a), const_uint(b)) => {
match op.node {
ast::BiShl => Ok(const_int(a << b as uint)),
ast::BiShr => Ok(const_int(a >> b as uint)),
_ => Err("can't do this op on an int and uint".to_string())
ast::BiShl => const_int(a << b as uint),
ast::BiShr => const_int(a >> b as uint),
_ => signal!(e, InvalidOpForIntUint(op.node)),
}
}
(Ok(const_uint(a)), Ok(const_int(b))) => {
(const_uint(a), const_int(b)) => {
match op.node {
ast::BiShl => Ok(const_uint(a << b as uint)),
ast::BiShr => Ok(const_uint(a >> b as uint)),
_ => Err("can't do this op on a uint and int".to_string())
ast::BiShl => const_uint(a << b as uint),
ast::BiShr => const_uint(a >> b as uint),
_ => signal!(e, InvalidOpForUintInt(op.node)),
}
}
(Ok(const_bool(a)), Ok(const_bool(b))) => {
Ok(const_bool(match op.node {
(const_bool(a), const_bool(b)) => {
const_bool(match op.node {
ast::BiAnd => a && b,
ast::BiOr => a || b,
ast::BiBitXor => a ^ b,
@ -361,10 +476,11 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
ast::BiBitOr => a | b,
ast::BiEq => a == b,
ast::BiNe => a != b,
_ => return Err("can't do this op on bools".to_string())
}))
_ => signal!(e, InvalidOpForBools(op.node)),
})
}
_ => Err("bad operands for binary".to_string())
_ => signal!(e, MiscBinaryOp),
}
}
ast::ExprCast(ref base, ref target_ty) => {
@ -379,7 +495,10 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
// Prefer known type to noop, but always have a type hint.
let base_hint = ty::expr_ty_opt(tcx, &**base).unwrap_or(ety);
let val = try!(eval_const_expr_partial(tcx, &**base, Some(base_hint)));
cast_const(val, ety)
match cast_const(val, ety) {
Ok(val) => val,
Err(kind) => return Err(ConstEvalErr { span: e.span, kind: kind }),
}
}
ast::ExprPath(..) => {
let opt_def = tcx.def_map.borrow().get(&e.id).map(|d| d.full_def());
@ -406,19 +525,19 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
};
let const_expr = match const_expr {
Some(actual_e) => actual_e,
None => return Err("non-constant path in constant expr".to_string())
None => signal!(e, NonConstPath)
};
let ety = ety.or_else(|| const_ty.and_then(|ty| ast_ty_to_prim_ty(tcx, ty)));
eval_const_expr_partial(tcx, const_expr, ety)
try!(eval_const_expr_partial(tcx, const_expr, ety))
}
ast::ExprLit(ref lit) => {
Ok(lit_to_const(&**lit, ety))
lit_to_const(&**lit, ety)
}
ast::ExprParen(ref e) => eval_const_expr_partial(tcx, &**e, ety),
ast::ExprParen(ref e) => try!(eval_const_expr_partial(tcx, &**e, ety)),
ast::ExprBlock(ref block) => {
match block.expr {
Some(ref expr) => eval_const_expr_partial(tcx, &**expr, ety),
None => Ok(const_int(0i64))
Some(ref expr) => try!(eval_const_expr_partial(tcx, &**expr, ety)),
None => const_int(0i64)
}
}
ast::ExprTupField(ref base, index) => {
@ -426,13 +545,13 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
if let Some(&ast::ExprTup(ref fields)) = lookup_const(tcx, &**base).map(|s| &s.node) {
// Check that the given index is within bounds and evaluate its value
if fields.len() > index.node {
return eval_const_expr_partial(tcx, &*fields[index.node], None)
return eval_const_expr_partial(tcx, &*fields[index.node], None);
} else {
return Err("tuple index out of bounds".to_string())
signal!(e, TupleIndexOutOfBounds);
}
}
Err("non-constant struct in constant expr".to_string())
signal!(e, NonConstStruct);
}
ast::ExprField(ref base, field_name) => {
// Get the base expression if it is a struct and it is constant
@ -441,19 +560,21 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
// Check that the given field exists and evaluate it
if let Some(f) = fields.iter().find(|f|
f.ident.node.as_str() == field_name.node.as_str()) {
return eval_const_expr_partial(tcx, &*f.expr, None)
return eval_const_expr_partial(tcx, &*f.expr, None);
} else {
return Err("nonexistent struct field".to_string())
signal!(e, MissingStructField);
}
}
Err("non-constant struct in constant expr".to_string())
signal!(e, NonConstStruct);
}
_ => Err("unsupported constant expr".to_string())
}
_ => signal!(e, MiscCatchAll)
};
Ok(result)
}
fn cast_const(val: const_val, ty: Ty) -> Result<const_val, String> {
fn cast_const(val: const_val, ty: Ty) -> Result<const_val, ErrKind> {
macro_rules! define_casts {
($($ty_pat:pat => (
$intermediate_ty:ty,
@ -466,11 +587,10 @@ fn cast_const(val: const_val, ty: Ty) -> Result<const_val, String> {
const_uint(u) => Ok($const_type(u as $intermediate_ty as $target_ty)),
const_int(i) => Ok($const_type(i as $intermediate_ty as $target_ty)),
const_float(f) => Ok($const_type(f as $intermediate_ty as $target_ty)),
_ => Err(concat!("can't cast this type to ",
stringify!($const_type)).to_string())
_ => Err(ErrKind::CannotCastTo(stringify!($const_type))),
}
},)*
_ => Err("can't cast this type".to_string())
_ => Err(ErrKind::CannotCast),
})
}
@ -544,15 +664,15 @@ pub fn compare_lit_exprs<'tcx>(tcx: &ty::ctxt<'tcx>,
-> Option<Ordering> {
let a = match eval_const_expr_partial(tcx, a, ty_hint) {
Ok(a) => a,
Err(s) => {
tcx.sess.span_err(a.span, &s[..]);
Err(e) => {
tcx.sess.span_err(a.span, e.description().as_slice());
return None;
}
};
let b = match eval_const_expr_partial(tcx, b, ty_hint) {
Ok(b) => b,
Err(s) => {
tcx.sess.span_err(b.span, &s[..]);
Err(e) => {
tcx.sess.span_err(b.span, e.description().as_slice());
return None;
}
};

View File

@ -5333,6 +5333,7 @@ pub fn type_is_empty(cx: &ctxt, ty: Ty) -> bool {
pub fn enum_variants<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
-> Rc<Vec<Rc<VariantInfo<'tcx>>>> {
use std::num::Int; // For checked_add
memoized(&cx.enum_var_cache, id, |id: ast::DefId| {
if ast::LOCAL_CRATE != id.krate {
Rc::new(csearch::get_enum_variants(cx, id))
@ -5349,11 +5350,7 @@ pub fn enum_variants<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
let mut last_discriminant: Option<Disr> = None;
Rc::new(enum_definition.variants.iter().map(|variant| {
let mut discriminant = match last_discriminant {
Some(val) => val + 1,
None => INITIAL_DISCRIMINANT_VALUE
};
let mut discriminant = INITIAL_DISCRIMINANT_VALUE;
if let Some(ref e) = variant.node.disr_expr {
// Preserve all values, and prefer signed.
let ty = Some(cx.types.i64);
@ -5369,11 +5366,24 @@ pub fn enum_variants<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
"expected signed integer constant");
}
Err(err) => {
span_err!(cx.sess, e.span, E0305,
"expected constant: {}", err);
span_err!(cx.sess, err.span, E0305,
"constant evaluation error: {}",
err.description().as_slice());
}
}
};
} else {
if let Some(val) = last_discriminant {
if let Some(v) = val.checked_add(1) {
discriminant = v
} else {
cx.sess.span_err(
variant.span,
&format!("Discriminant overflowed!"));
}
} else {
discriminant = INITIAL_DISCRIMINANT_VALUE;
}
}
last_discriminant = Some(discriminant);
Rc::new(VariantInfo::from_ast_variant(cx, &**variant,

View File

@ -259,7 +259,6 @@ pub enum CrateType {
CrateTypeStaticlib,
}
#[derive(Clone)]
pub enum Passes {
SomePasses(Vec<String>),
@ -349,7 +348,8 @@ macro_rules! options {
#[allow(non_upper_case_globals, dead_code)]
mod $mod_desc {
pub const parse_bool: Option<&'static str> = None;
pub const parse_opt_bool: Option<&'static str> = None;
pub const parse_opt_bool: Option<&'static str> =
Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`");
pub const parse_string: Option<&'static str> = Some("a string");
pub const parse_opt_string: Option<&'static str> = Some("a string");
pub const parse_list: Option<&'static str> = Some("a space-separated list of strings");
@ -380,7 +380,19 @@ macro_rules! options {
fn parse_opt_bool(slot: &mut Option<bool>, v: Option<&str>) -> bool {
match v {
Some(..) => false,
Some(s) => {
match s {
"n" | "no" | "off" => {
*slot = Some(false);
}
"y" | "yes" | "on" => {
*slot = Some(true);
}
_ => { return false; }
}
true
},
None => { *slot = Some(true); true }
}
}
@ -585,6 +597,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"Adds unstable command line options to rustc interface"),
print_enum_sizes: bool = (false, parse_bool,
"Print the size of enums and their variants"),
force_overflow_checks: Option<bool> = (None, parse_opt_bool,
"Force overflow checks on or off"),
}
pub fn default_lib_output() -> CrateType {

View File

@ -57,7 +57,7 @@ impl Hasher for FnvHasher {
let FnvHasher(mut hash) = *self;
for byte in bytes {
hash = hash ^ (*byte as u64);
hash = hash * 0x100000001b3;
hash = hash.wrapping_mul(0x100000001b3);
}
*self = FnvHasher(hash);
}

View File

@ -347,17 +347,19 @@ impl Engine256State {
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
w[$t] = sigma1(w[$t - 2]).wrapping_add(w[$t - 7])
.wrapping_add(sigma0(w[$t - 15])).wrapping_add(w[$t - 16]);
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
$H = $H.wrapping_add(sum1($E)).wrapping_add(ch($E, $F, $G))
.wrapping_add($K[$t]).wrapping_add(w[$t]);
$D = $D.wrapping_add($H);
$H = $H.wrapping_add(sum0($A)).wrapping_add(maj($A, $B, $C));
}
)
}
@ -397,14 +399,14 @@ impl Engine256State {
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
self.h0 = self.h0.wrapping_add(a);
self.h1 = self.h1.wrapping_add(b);
self.h2 = self.h2.wrapping_add(c);
self.h3 = self.h3.wrapping_add(d);
self.h4 = self.h4.wrapping_add(e);
self.h5 = self.h5.wrapping_add(f);
self.h6 = self.h6.wrapping_add(g);
self.h7 = self.h7.wrapping_add(h);
}
}

View File

@ -778,7 +778,9 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
assert!(bits <= 64);
let bits = bits as uint;
let mask = (-1u64 >> (64 - bits)) as Disr;
if (max + 1) & mask == min & mask {
// For a (max) discr of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
if (max.wrapping_add(1)) & mask == min & mask {
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
@ -787,7 +789,7 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
LoadRangeAssert(bcx, ptr, min, (max+1), /* signed: */ True)
LoadRangeAssert(bcx, ptr, min, (max.wrapping_add(1)), /* signed: */ True)
}
}

View File

@ -3086,6 +3086,12 @@ pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
let ty::CrateAnalysis { ty_cx: tcx, export_map, reachable, name, .. } = analysis;
let krate = tcx.map.krate();
let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
v
} else {
!attr::contains_name(&krate.config, "ndebug")
};
// Before we touch LLVM, make sure that multithreading is enabled.
unsafe {
use std::sync::{Once, ONCE_INIT};
@ -3113,7 +3119,8 @@ pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
export_map,
Sha256::new(),
link_meta.clone(),
reachable);
reachable,
check_overflow);
{
let ccx = shared_ccx.get_ccx(0);

View File

@ -462,9 +462,9 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ast::ExprIndex(ref base, ref index) => {
let (bv, bt) = const_expr(cx, &**base, param_substs);
let iv = match const_eval::eval_const_expr(cx.tcx(), &**index) {
const_eval::const_int(i) => i as u64,
const_eval::const_uint(u) => u,
let iv = match const_eval::eval_const_expr_partial(cx.tcx(), &**index, None) {
Ok(const_eval::const_int(i)) => i as u64,
Ok(const_eval::const_uint(u)) => u,
_ => cx.sess().span_bug(index.span,
"index is not an integer-constant expression")
};
@ -650,9 +650,9 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ast::ExprRepeat(ref elem, ref count) => {
let unit_ty = ty::sequence_element_type(cx.tcx(), ety);
let llunitty = type_of::type_of(cx, unit_ty);
let n = match const_eval::eval_const_expr(cx.tcx(), &**count) {
const_eval::const_int(i) => i as uint,
const_eval::const_uint(i) => i as uint,
let n = match const_eval::eval_const_expr_partial(cx.tcx(), &**count, None) {
Ok(const_eval::const_int(i)) => i as uint,
Ok(const_eval::const_uint(i)) => i as uint,
_ => cx.sess().span_bug(count.span, "count must be integral const expression.")
};
let unit_val = const_expr(cx, &**elem, param_substs).0;

View File

@ -69,6 +69,7 @@ pub struct SharedCrateContext<'tcx> {
symbol_hasher: RefCell<Sha256>,
tcx: ty::ctxt<'tcx>,
stats: Stats,
check_overflow: bool,
available_monomorphizations: RefCell<FnvHashSet<String>>,
available_drop_glues: RefCell<FnvHashMap<Ty<'tcx>, String>>,
@ -245,7 +246,8 @@ impl<'tcx> SharedCrateContext<'tcx> {
export_map: ExportMap,
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet)
reachable: NodeSet,
check_overflow: bool)
-> SharedCrateContext<'tcx> {
let (metadata_llcx, metadata_llmod) = unsafe {
create_context_and_module(&tcx.sess, "metadata")
@ -274,6 +276,7 @@ impl<'tcx> SharedCrateContext<'tcx> {
llvm_insns: RefCell::new(FnvHashMap()),
fn_stats: RefCell::new(Vec::new()),
},
check_overflow: check_overflow,
available_monomorphizations: RefCell::new(FnvHashSet()),
available_drop_glues: RefCell::new(FnvHashMap()),
};
@ -743,6 +746,10 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&format!("the type `{}` is too big for the current architecture",
obj.repr(self.tcx())))
}
pub fn check_overflow(&self) -> bool {
self.shared.check_overflow
}
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {

View File

@ -82,6 +82,7 @@ use trans::machine::{llsize_of, llsize_of_alloc};
use trans::type_::Type;
use syntax::{ast, ast_util, codemap};
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use syntax::parse::token;
use std::iter::repeat;
@ -1709,8 +1710,8 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
};
let is_float = ty::type_is_fp(intype);
let is_signed = ty::type_is_signed(intype);
let rhs = base::cast_shift_expr_rhs(bcx, op, lhs, rhs);
let info = expr_info(binop_expr);
let binop_debug_loc = binop_expr.debug_loc();
@ -1720,21 +1721,30 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if is_float {
FAdd(bcx, lhs, rhs, binop_debug_loc)
} else {
Add(bcx, lhs, rhs, binop_debug_loc)
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiSub => {
if is_float {
FSub(bcx, lhs, rhs, binop_debug_loc)
} else {
Sub(bcx, lhs, rhs, binop_debug_loc)
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiMul => {
if is_float {
FMul(bcx, lhs, rhs, binop_debug_loc)
} else {
Mul(bcx, lhs, rhs, binop_debug_loc)
let (newbcx, res) = with_overflow_check(
bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
bcx = newbcx;
res
}
}
ast::BiDiv => {
@ -2314,3 +2324,110 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
DatumBlock { bcx: bcx, datum: datum }
}
}
enum OverflowOp {
Add,
Sub,
Mul,
}
impl OverflowOp {
fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use middle::ty::{ty_int, ty_uint};
let new_sty = match ty.sty {
ty_int(TyIs(_)) => match &tcx.sess.target.target.target_pointer_width[..] {
"32" => ty_int(TyI32),
"64" => ty_int(TyI64),
_ => panic!("unsupported target word size")
},
ty_uint(TyUs(_)) => match &tcx.sess.target.target.target_pointer_width[..] {
"32" => ty_uint(TyU32),
"64" => ty_uint(TyU64),
_ => panic!("unsupported target word size")
},
ref t @ ty_uint(_) | ref t @ ty_int(_) => t.clone(),
_ => panic!("tried to get overflow intrinsic for non-int type")
};
match *self {
OverflowOp::Add => match new_sty {
ty_int(TyI8) => "llvm.sadd.with.overflow.i8",
ty_int(TyI16) => "llvm.sadd.with.overflow.i16",
ty_int(TyI32) => "llvm.sadd.with.overflow.i32",
ty_int(TyI64) => "llvm.sadd.with.overflow.i64",
ty_uint(TyU8) => "llvm.uadd.with.overflow.i8",
ty_uint(TyU16) => "llvm.uadd.with.overflow.i16",
ty_uint(TyU32) => "llvm.uadd.with.overflow.i32",
ty_uint(TyU64) => "llvm.uadd.with.overflow.i64",
_ => unreachable!(),
},
OverflowOp::Sub => match new_sty {
ty_int(TyI8) => "llvm.ssub.with.overflow.i8",
ty_int(TyI16) => "llvm.ssub.with.overflow.i16",
ty_int(TyI32) => "llvm.ssub.with.overflow.i32",
ty_int(TyI64) => "llvm.ssub.with.overflow.i64",
ty_uint(TyU8) => "llvm.usub.with.overflow.i8",
ty_uint(TyU16) => "llvm.usub.with.overflow.i16",
ty_uint(TyU32) => "llvm.usub.with.overflow.i32",
ty_uint(TyU64) => "llvm.usub.with.overflow.i64",
_ => unreachable!(),
},
OverflowOp::Mul => match new_sty {
ty_int(TyI8) => "llvm.smul.with.overflow.i8",
ty_int(TyI16) => "llvm.smul.with.overflow.i16",
ty_int(TyI32) => "llvm.smul.with.overflow.i32",
ty_int(TyI64) => "llvm.smul.with.overflow.i64",
ty_uint(TyU8) => "llvm.umul.with.overflow.i8",
ty_uint(TyU16) => "llvm.umul.with.overflow.i16",
ty_uint(TyU32) => "llvm.umul.with.overflow.i32",
ty_uint(TyU64) => "llvm.umul.with.overflow.i64",
_ => unreachable!(),
},
}
}
}
fn with_overflow_check<'a, 'b>(bcx: Block<'a, 'b>, oop: OverflowOp, info: NodeIdAndSpan,
lhs_t: Ty, lhs: ValueRef, rhs: ValueRef, binop_debug_loc: DebugLoc)
-> (Block<'a, 'b>, ValueRef) {
if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
if bcx.ccx().check_overflow() {
let name = oop.to_intrinsic_name(bcx.tcx(), lhs_t);
let llfn = bcx.ccx().get_intrinsic(&name);
let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
let result = ExtractValue(bcx, val, 0); // iN operation result
let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
binop_debug_loc);
let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
None, binop_debug_loc);
let bcx =
base::with_cond(bcx, cond, |bcx|
controlflow::trans_fail(bcx, info,
InternedString::new("arithmetic operation overflowed")));
(bcx, result)
} else {
let res = match oop {
OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
};
(bcx, res)
}
}

View File

@ -660,6 +660,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
llargs[0],
llargs[1],
call_debug_location),
(_, "overflowing_add") => Add(bcx, llargs[0], llargs[1], call_debug_location),
(_, "overflowing_sub") => Sub(bcx, llargs[0], llargs[1], call_debug_location),
(_, "overflowing_mul") => Mul(bcx, llargs[0], llargs[1], call_debug_location),
(_, "return_address") => {
if !fcx.caller_expects_out_pointer {
tcx.sess.span_err(call_info.span,

View File

@ -205,9 +205,9 @@ pub fn opt_ast_region_to_region<'tcx>(
if len == 2 && i == 0 {
m.push_str(" or ");
} else if i == len - 2 {
} else if i + 2 == len {
m.push_str(", or ");
} else if i != len - 1 {
} else if i + 1 != len {
m.push_str(", ");
}
}
@ -1233,17 +1233,18 @@ pub fn finish_resolving_def_to_ty<'tcx>(this: &AstConv<'tcx>,
if segments.is_empty() {
opt_self_ty.expect("missing T in <T>::a::b::c")
} else {
tcx.sess.span_bug(span,
&format!("found module name used as a type: {}",
tcx.map.node_to_string(id.node)));
span_err!(tcx.sess, span, E0247, "found module name used as a type: {}",
tcx.map.node_to_string(id.node));
return this.tcx().types.err;
}
}
def::DefPrimTy(prim_ty) => {
prim_ty_to_ty(tcx, segments, prim_ty)
}
_ => {
span_fatal!(tcx.sess, span, E0248,
"found value name used as a type: {:?}", *def);
span_err!(tcx.sess, span, E0248,
"found value name used as a type: {:?}", *def);
return this.tcx().types.err;
}
};
@ -1278,10 +1279,11 @@ pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
match ast_ty_to_ty_cache.get(&ast_ty.id) {
Some(&ty::atttce_resolved(ty)) => return ty,
Some(&ty::atttce_unresolved) => {
span_fatal!(tcx.sess, ast_ty.span, E0246,
span_err!(tcx.sess, ast_ty.span, E0246,
"illegal recursive type; insert an enum \
or struct in the cycle, if this is \
desired");
return this.tcx().types.err;
}
None => { /* go on */ }
}
@ -1388,14 +1390,22 @@ pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty),
Some(i as uint)),
_ => {
span_fatal!(tcx.sess, ast_ty.span, E0249,
"expected constant expr for array length");
span_err!(tcx.sess, ast_ty.span, E0249,
"expected constant expr for array length");
this.tcx().types.err
}
}
}
Err(r) => {
span_fatal!(tcx.sess, ast_ty.span, E0250,
"expected constant expr for array length: {}", r);
Err(ref r) => {
let subspan =
ast_ty.span.lo <= r.span.lo && r.span.hi <= ast_ty.span.hi;
span_err!(tcx.sess, r.span, E0250,
"array length constant evaluation error: {}",
r.description().as_slice());
if !subspan {
span_note!(tcx.sess, ast_ty.span, "for array length here")
}
this.tcx().types.err
}
}
}

View File

@ -1363,10 +1363,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match self.inh.locals.borrow().get(&nid) {
Some(&t) => t,
None => {
self.tcx().sess.span_bug(
self.tcx().sess.span_err(
span,
&format!("no type for local variable {}",
nid));
&format!("no type for local variable {}", nid));
self.tcx().types.err
}
}
}
@ -4554,6 +4554,7 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
id: ast::NodeId,
hint: attr::ReprAttr)
-> Vec<Rc<ty::VariantInfo<'tcx>>> {
use std::num::Int;
let rty = ty::node_id_to_type(ccx.tcx, id);
let mut variants: Vec<Rc<ty::VariantInfo>> = Vec::new();
@ -4565,7 +4566,13 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
// If the discriminant value is specified explicitly in the enum check whether the
// initialization expression is valid, otherwise use the last value plus one.
let mut current_disr_val = match prev_disr_val {
Some(prev_disr_val) => prev_disr_val + 1,
Some(prev_disr_val) => {
if let Some(v) = prev_disr_val.checked_add(1) {
v
} else {
ty::INITIAL_DISCRIMINANT_VALUE
}
}
None => ty::INITIAL_DISCRIMINANT_VALUE
};
@ -4597,8 +4604,9 @@ pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
"expected signed integer constant");
}
Err(ref err) => {
span_err!(ccx.tcx.sess, e.span, E0080,
"expected constant: {}", *err);
span_err!(ccx.tcx.sess, err.span, E0080,
"constant evaluation error: {}",
err.description().as_slice());
}
}
},
@ -5491,6 +5499,9 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
(0, vec!(tcx.types.u64, tcx.types.u64),
ty::mk_tup(tcx, vec!(tcx.types.u64, tcx.types.bool))),
"overflowing_add" | "overflowing_sub" | "overflowing_mul" =>
(1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),
"return_address" => (0, vec![], ty::mk_imm_ptr(tcx, tcx.types.u8)),
"assume" => (0, vec![tcx.types.bool], ty::mk_nil(tcx)),

View File

@ -1569,8 +1569,8 @@ impl<T: Iterator<Item=char>> Parser<T> {
while !self.eof() {
match self.ch_or_null() {
c @ '0' ... '9' => {
accum *= 10;
accum += (c as u64) - ('0' as u64);
accum = accum.wrapping_mul(10);
accum = accum.wrapping_add((c as u64) - ('0' as u64));
// Detect overflow by comparing to the last value.
if accum <= last_accum { return self.error(InvalidNumber); }

View File

@ -314,6 +314,13 @@ fn search_hashed<K, V, M, F>(table: M,
M: Deref<Target=RawTable<K, V>>,
F: FnMut(&K) -> bool,
{
// This is the only function where capacity can be zero. To avoid
// undefined behaviour when Bucket::new gets the raw bucket in this
// case, immediately return the appropriate search result.
if table.capacity() == 0 {
return TableRef(table);
}
let size = table.size();
let mut probe = Bucket::new(table, hash);
let ib = probe.index();

View File

@ -20,6 +20,7 @@ use marker::{Copy, Send, Sync, Sized, self};
use mem::{min_align_of, size_of};
use mem;
use num::{Int, UnsignedInt};
use num::wrapping::{OverflowingOps, WrappingOps};
use ops::{Deref, DerefMut, Drop};
use option::Option;
use option::Option::{Some, None};
@ -224,6 +225,9 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> {
}
pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> {
// if capacity is 0, then the RawBucket will be populated with bogus pointers.
// This is an uncommon case though, so avoid it in release builds.
debug_assert!(table.capacity() > 0, "Table should have capacity at this point");
let ib_index = ib_index & (table.capacity() - 1);
Bucket {
raw: unsafe {
@ -371,7 +375,7 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> {
// Calculates the distance one has to travel when going from
// `hash mod capacity` onwards to `idx mod capacity`, wrapping around
// if the destination is not reached before the end of the table.
(self.idx - self.hash().inspect() as usize) & (self.table.capacity() - 1)
(self.idx.wrapping_sub(self.hash().inspect() as usize)) & (self.table.capacity() - 1)
}
#[inline]
@ -524,13 +528,13 @@ fn test_rounding() {
fn calculate_offsets(hashes_size: usize,
keys_size: usize, keys_align: usize,
vals_align: usize)
-> (usize, usize) {
-> (usize, usize, bool) {
let keys_offset = round_up_to_next(hashes_size, keys_align);
let end_of_keys = keys_offset + keys_size;
let (end_of_keys, oflo) = keys_offset.overflowing_add(keys_size);
let vals_offset = round_up_to_next(end_of_keys, vals_align);
(keys_offset, vals_offset)
(keys_offset, vals_offset, oflo)
}
// Returns a tuple of (minimum required malloc alignment, hash_offset,
@ -538,26 +542,26 @@ fn calculate_offsets(hashes_size: usize,
fn calculate_allocation(hash_size: usize, hash_align: usize,
keys_size: usize, keys_align: usize,
vals_size: usize, vals_align: usize)
-> (usize, usize, usize) {
-> (usize, usize, usize, bool) {
let hash_offset = 0;
let (_, vals_offset) = calculate_offsets(hash_size,
keys_size, keys_align,
vals_align);
let end_of_vals = vals_offset + vals_size;
let (_, vals_offset, oflo) = calculate_offsets(hash_size,
keys_size, keys_align,
vals_align);
let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size);
let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
(min_align, hash_offset, end_of_vals)
(min_align, hash_offset, end_of_vals, oflo || oflo2)
}
#[test]
fn test_offset_calculation() {
assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148));
assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6));
assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48));
assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144));
assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5));
assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24));
assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148, false));
assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6, false));
assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48, false));
assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144, false));
assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5, false));
assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24, false));
}
impl<K, V> RawTable<K, V> {
@ -587,12 +591,14 @@ impl<K, V> RawTable<K, V> {
// This is great in theory, but in practice getting the alignment
// right is a little subtle. Therefore, calculating offsets has been
// factored out into a different function.
let (malloc_alignment, hash_offset, size) =
let (malloc_alignment, hash_offset, size, oflo) =
calculate_allocation(
hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::< K >(),
vals_size, min_align_of::< V >());
assert!(!oflo, "capacity overflow");
// One check for overflow that covers calculation and rounding of size.
let size_of_bucket = size_of::<u64>().checked_add(size_of::<K>()).unwrap()
.checked_add(size_of::<V>()).unwrap();
@ -618,10 +624,11 @@ impl<K, V> RawTable<K, V> {
let keys_size = self.capacity * size_of::<K>();
let buffer = *self.hashes as *mut u8;
let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
keys_size, min_align_of::<K>(),
min_align_of::<V>());
let (keys_offset, vals_offset, oflo) =
calculate_offsets(hashes_size,
keys_size, min_align_of::<K>(),
min_align_of::<V>());
debug_assert!(!oflo, "capacity overflow");
unsafe {
RawBucket {
hash: *self.hashes,
@ -995,9 +1002,12 @@ impl<K, V> Drop for RawTable<K, V> {
let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let vals_size = self.capacity * size_of::<V>();
let (align, _, size) = calculate_allocation(hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::<K>(),
vals_size, min_align_of::<V>());
let (align, _, size, oflo) =
calculate_allocation(hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::<K>(),
vals_size, min_align_of::<V>());
debug_assert!(!oflo, "should be impossible");
unsafe {
deallocate(*self.hashes as *mut u8, size, align);

View File

@ -30,6 +30,7 @@ pub use core::num::{from_uint, from_u8, from_u16, from_u32, from_u64};
pub use core::num::{from_f32, from_f64};
pub use core::num::{FromStrRadix, from_str_radix};
pub use core::num::{FpCategory, ParseIntError, ParseFloatError};
pub use core::num::wrapping;
use option::Option;
@ -1757,25 +1758,25 @@ mod tests {
let mut u8_val: u8 = 255_u8;
assert_eq!(u8_val.to_string(), "255");
u8_val += 1 as u8;
u8_val = u8_val.wrapping_add(1);
assert_eq!(u8_val.to_string(), "0");
let mut u16_val: u16 = 65_535_u16;
assert_eq!(u16_val.to_string(), "65535");
u16_val += 1 as u16;
u16_val = u16_val.wrapping_add(1);
assert_eq!(u16_val.to_string(), "0");
let mut u32_val: u32 = 4_294_967_295_u32;
assert_eq!(u32_val.to_string(), "4294967295");
u32_val += 1 as u32;
u32_val = u32_val.wrapping_add(1);
assert_eq!(u32_val.to_string(), "0");
let mut u64_val: u64 = 18_446_744_073_709_551_615_u64;
assert_eq!(u64_val.to_string(), "18446744073709551615");
u64_val += 1 as u64;
u64_val = u64_val.wrapping_add(1);
assert_eq!(u64_val.to_string(), "0");
}
@ -1789,7 +1790,7 @@ mod tests {
assert_eq!(from_str::<u8>("255"), Some(u8_val));
assert_eq!(from_str::<u8>("256"), None);
u8_val += 1 as u8;
u8_val = u8_val.wrapping_add(1);
assert_eq!(from_str::<u8>("0"), Some(u8_val));
assert_eq!(from_str::<u8>("-1"), None);
@ -1797,7 +1798,7 @@ mod tests {
assert_eq!(from_str::<u16>("65535"), Some(u16_val));
assert_eq!(from_str::<u16>("65536"), None);
u16_val += 1 as u16;
u16_val = u16_val.wrapping_add(1);
assert_eq!(from_str::<u16>("0"), Some(u16_val));
assert_eq!(from_str::<u16>("-1"), None);
@ -1805,7 +1806,7 @@ mod tests {
assert_eq!(from_str::<u32>("4294967295"), Some(u32_val));
assert_eq!(from_str::<u32>("4294967296"), None);
u32_val += 1 as u32;
u32_val = u32_val.wrapping_add(1);
assert_eq!(from_str::<u32>("0"), Some(u32_val));
assert_eq!(from_str::<u32>("-1"), None);
@ -1813,7 +1814,7 @@ mod tests {
assert_eq!(from_str::<u64>("18446744073709551615"), Some(u64_val));
assert_eq!(from_str::<u64>("18446744073709551616"), None);
u64_val += 1 as u64;
u64_val = u64_val.wrapping_add(1);
assert_eq!(from_str::<u64>("0"), Some(u64_val));
assert_eq!(from_str::<u64>("-1"), None);
}

View File

@ -427,6 +427,7 @@ const DIGIT_E_RADIX: u32 = ('e' as u32) - ('a' as u32) + 11;
#[cfg(test)]
mod tests {
use core::num::wrapping::WrappingOps;
use string::ToString;
#[test]
@ -434,25 +435,25 @@ mod tests {
let mut i8_val: i8 = 127_i8;
assert_eq!(i8_val.to_string(), "127");
i8_val += 1 as i8;
i8_val = i8_val.wrapping_add(1);
assert_eq!(i8_val.to_string(), "-128");
let mut i16_val: i16 = 32_767_i16;
assert_eq!(i16_val.to_string(), "32767");
i16_val += 1 as i16;
i16_val = i16_val.wrapping_add(1);
assert_eq!(i16_val.to_string(), "-32768");
let mut i32_val: i32 = 2_147_483_647_i32;
assert_eq!(i32_val.to_string(), "2147483647");
i32_val += 1 as i32;
i32_val = i32_val.wrapping_add(1);
assert_eq!(i32_val.to_string(), "-2147483648");
let mut i64_val: i64 = 9_223_372_036_854_775_807_i64;
assert_eq!(i64_val.to_string(), "9223372036854775807");
i64_val += 1 as i64;
i64_val = i64_val.wrapping_add(1);
assert_eq!(i64_val.to_string(), "-9223372036854775808");
}
}

View File

@ -58,3 +58,5 @@
#[doc(no_inline)] pub use old_io::{Buffer, Writer, Reader, Seek, BufferPrelude};
// NB: remove when range syntax lands
#[doc(no_inline)] pub use iter::range;
#[doc(no_inline)] pub use num::wrapping::{Wrapping, WrappingOps};

View File

@ -386,8 +386,8 @@ impl Rng for ThreadRng {
/// ```
/// use std::rand;
///
/// let x = rand::random();
/// println!("{}", 2u8 * x);
/// let x: u8 = rand::random();
/// println!("{}", 2 * x as u16);
///
/// let y = rand::random::<f64>();
/// println!("{}", y);

View File

@ -27,6 +27,7 @@ use marker::Send;
use ops::FnOnce;
use sys;
use thunk::Thunk;
use usize;
// Reexport some of our utilities which are expected by other crates.
pub use self::util::{default_sched_threads, min_stack, running_on_valgrind};
@ -78,7 +79,20 @@ fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int {
// FIXME #11359 we just assume that this thread has a stack of a
// certain size, and estimate that there's at most 20KB of stack
// frames above our current position.
let my_stack_bottom = my_stack_top + 20000 - OS_DEFAULT_STACK_ESTIMATE;
const TWENTY_KB: uint = 20000;
// saturating-add to sidestep overflow
let top_plus_spill = if usize::MAX - TWENTY_KB < my_stack_top {
usize::MAX
} else {
my_stack_top + TWENTY_KB
};
// saturating-sub to sidestep underflow
let my_stack_bottom = if top_plus_spill < OS_DEFAULT_STACK_ESTIMATE {
0
} else {
top_plus_spill - OS_DEFAULT_STACK_ESTIMATE
};
let failed = unsafe {
// First, make sure we don't trigger any __morestack overflow checks,

View File

@ -176,7 +176,7 @@ pub fn write(w: &mut Writer) -> IoResult<()> {
let mut ip = unsafe {
uw::_Unwind_GetIPInfo(ctx, &mut ip_before_insn) as *mut libc::c_void
};
if ip_before_insn == 0 {
if !ip.is_null() && ip_before_insn == 0 {
// this is a non-signaling frame, so `ip` refers to the address
// after the calling instruction. account for that.
ip = (ip as usize - 1) as *mut _;

View File

@ -181,7 +181,6 @@ fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
Struct(ref fields) => {
let emit_struct_field = cx.ident_of("emit_struct_field");
let mut stmts = Vec::new();
let last = fields.len() - 1;
for (i, &FieldInfo {
name,
ref self_,
@ -204,6 +203,7 @@ fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
lambda));
// last call doesn't need a try!
let last = fields.len() - 1;
let call = if i != last {
cx.expr_try(span, call)
} else {
@ -240,25 +240,24 @@ fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
let encoder = cx.expr_ident(trait_span, blkarg);
let emit_variant_arg = cx.ident_of("emit_enum_variant_arg");
let mut stmts = Vec::new();
let last = fields.len() - 1;
for (i, &FieldInfo { ref self_, span, .. }) in fields.iter().enumerate() {
let enc = cx.expr_method_call(span, self_.clone(),
encode, vec!(blkencoder.clone()));
let lambda = cx.lambda_expr_1(span, enc, blkarg);
let call = cx.expr_method_call(span, blkencoder.clone(),
emit_variant_arg,
vec!(cx.expr_usize(span, i),
lambda));
let call = if i != last {
cx.expr_try(span, call)
} else {
cx.expr(span, ExprRet(Some(call)))
};
stmts.push(cx.stmt_expr(call));
}
// enums with no fields need to return Ok()
if stmts.len() == 0 {
if fields.len() > 0 {
let last = fields.len() - 1;
for (i, &FieldInfo { ref self_, span, .. }) in fields.iter().enumerate() {
let enc = cx.expr_method_call(span, self_.clone(),
encode, vec!(blkencoder.clone()));
let lambda = cx.lambda_expr_1(span, enc, blkarg);
let call = cx.expr_method_call(span, blkencoder.clone(),
emit_variant_arg,
vec!(cx.expr_usize(span, i),
lambda));
let call = if i != last {
cx.expr_try(span, call)
} else {
cx.expr(span, ExprRet(Some(call)))
};
stmts.push(cx.stmt_expr(call));
}
} else {
let ret_ok = cx.expr(trait_span,
ExprRet(Some(cx.expr_ok(trait_span,
cx.expr_tuple(trait_span, vec![])))));

View File

@ -0,0 +1,23 @@
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that an constant-evaluation underflow highlights the correct
// spot (where the underflow occurred), while also providing the
// overall context for what caused the evaluation.
const ONE: usize = 1;
const TWO: usize = 2;
const LEN: usize = ONE - TWO;
//~^ ERROR array length constant evaluation error: attempted to sub with overflow [E0250]
fn main() {
let a: [i8; LEN] = unimplemented!();
//~^ NOTE for array length here
}

View File

@ -0,0 +1,20 @@
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that an constant-evaluation underflow highlights the correct
// spot (where the underflow occurred).
const ONE: usize = 1;
const TWO: usize = 2;
fn main() {
let a: [i8; ONE - TWO] = unimplemented!();
//~^ ERROR array length constant evaluation error: attempted to sub with overflow [E0250]
}

View File

@ -9,8 +9,8 @@
// except according to those terms.
enum test {
div_zero = 1/0, //~ERROR expected constant: attempted to divide by zero
rem_zero = 1%0 //~ERROR expected constant: attempted remainder with a divisor of zero
div_zero = 1/0, //~ERROR constant evaluation error: attempted to divide by zero
rem_zero = 1%0 //~ERROR constant evaluation error: attempted remainder with a divisor of zero
}
fn main() {}

View File

@ -23,6 +23,7 @@ pub trait Public: Private<
<Self as Public>::P,
//~^ ERROR illegal recursive type; insert an enum or struct in the cycle, if this is desired
<Self as Public>::R
//~^ ERROR unsupported cyclic reference between types/traits detected
> {
type P;
type R;

View File

@ -12,5 +12,7 @@ const TUP: (usize,) = (42,);
fn main() {
let a: [isize; TUP.1];
//~^ ERROR expected constant expr for array length: tuple index out of bounds
//~^ ERROR array length constant evaluation error: tuple index out of bounds
//~| ERROR attempted out-of-bounds tuple index
//~| ERROR attempted out-of-bounds tuple index
}

View File

@ -13,5 +13,7 @@ const STRUCT: MyStruct = MyStruct { field: 42 };
fn main() {
let a: [isize; STRUCT.nonexistent_field];
//~^ ERROR expected constant expr for array length: nonexistent struct field
//~^ ERROR array length constant evaluation error: nonexistent struct field
//~| ERROR attempted access of field `nonexistent_field`
//~| ERROR attempted access of field `nonexistent_field`
}

View File

@ -13,6 +13,7 @@
fn main() {
fn bar(n: isize) {
let _x: [isize; n];
//~^ ERROR expected constant expr for array length: non-constant path in constant expr
//~^ ERROR no type for local variable
//~| ERROR array length constant evaluation error: non-constant path in constant expr
}
}

View File

@ -0,0 +1,18 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
// (Work around constant-evaluation)
fn value() -> u8 { 200 }
fn main() {
let _x = value() + value() + value();
}

View File

@ -0,0 +1,18 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
// (Work around constant-evaluation)
fn value() -> u8 { 200 }
fn main() {
let x = value() * 4;
}

View File

@ -0,0 +1,18 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:thread '<main>' panicked at 'arithmetic operation overflowed'
// (Work around constant-evaluation)
fn value() -> u8 { 42 }
fn main() {
let _x = value() - (value() + 1);
}

View File

@ -68,7 +68,7 @@ fn dump_filelines(filelines: &[Pos]) {
}
#[inline(never)]
fn inner(counter: &mut u32, main_pos: Pos, outer_pos: Pos) {
fn inner(counter: &mut i32, main_pos: Pos, outer_pos: Pos) {
check!(counter; main_pos, outer_pos);
check!(counter; main_pos, outer_pos);
let inner_pos = pos!(); aux::callback(|aux_pos| {
@ -80,12 +80,12 @@ fn inner(counter: &mut u32, main_pos: Pos, outer_pos: Pos) {
}
#[inline(always)]
fn inner_inlined(counter: &mut u32, main_pos: Pos, outer_pos: Pos) {
fn inner_inlined(counter: &mut i32, main_pos: Pos, outer_pos: Pos) {
check!(counter; main_pos, outer_pos);
check!(counter; main_pos, outer_pos);
#[inline(always)]
fn inner_further_inlined(counter: &mut u32, main_pos: Pos, outer_pos: Pos, inner_pos: Pos) {
fn inner_further_inlined(counter: &mut i32, main_pos: Pos, outer_pos: Pos, inner_pos: Pos) {
check!(counter; main_pos, outer_pos, inner_pos);
}
inner_further_inlined(counter, main_pos, outer_pos, pos!());
@ -103,7 +103,7 @@ fn inner_inlined(counter: &mut u32, main_pos: Pos, outer_pos: Pos) {
}
#[inline(never)]
fn outer(mut counter: u32, main_pos: Pos) {
fn outer(mut counter: i32, main_pos: Pos) {
inner(&mut counter, main_pos, pos!());
inner_inlined(&mut counter, main_pos, pos!());
}

View File

@ -10,7 +10,7 @@
use std::num::Int;
extern "C" fn foo<T: Int>(a: T, b: T) -> T { a + b }
extern "C" fn foo<T: WrappingOps>(a: T, b: T) -> T { a.wrapping_add(b) }
fn main() {
assert_eq!(99u8, foo(255u8, 100u8));