Mass rename uint/int to usize/isize

Now that support has been removed, all lingering use cases are renamed.
This commit is contained in:
Alex Crichton 2015-03-25 17:06:52 -07:00
parent 54f16b818b
commit 43bfaa4a33
1391 changed files with 5180 additions and 5238 deletions

View File

@ -12,7 +12,6 @@
#![feature(box_syntax)]
#![feature(collections)]
#![feature(int_uint)]
#![feature(old_io)]
#![feature(old_path)]
#![feature(rustc_private)]

View File

@ -15,13 +15,13 @@ use std::io::prelude::*;
use std::path::Path;
pub struct ExpectedError {
pub line: uint,
pub line: usize,
pub kind: String,
pub msg: String,
}
#[derive(PartialEq, Debug)]
enum WhichLine { ThisLine, FollowPrevious(uint), AdjustBackward(uint) }
enum WhichLine { ThisLine, FollowPrevious(usize), AdjustBackward(usize) }
/// Looks for either "//~| KIND MESSAGE" or "//~^^... KIND MESSAGE"
/// The former is a "follow" that inherits its target from the preceding line;
@ -58,8 +58,8 @@ pub fn load_errors(testfile: &Path) -> Vec<ExpectedError> {
}).collect()
}
fn parse_expected(last_nonfollow_error: Option<uint>,
line_num: uint,
fn parse_expected(last_nonfollow_error: Option<usize>,
line_num: usize,
line: &str) -> Option<(WhichLine, ExpectedError)> {
let start = match line.find("//~") { Some(i) => i, None => return None };
let (follow, adjusts) = if line.char_at(start + 3) == '|' {

View File

@ -357,7 +357,7 @@ pub fn parse_name_value_directive(line: &str, directive: &str)
}
}
pub fn gdb_version_to_int(version_string: &str) -> int {
pub fn gdb_version_to_int(version_string: &str) -> isize {
let error_string = format!(
"Encountered GDB version string with unexpected format: {}",
version_string);
@ -369,17 +369,17 @@ pub fn gdb_version_to_int(version_string: &str) -> int {
panic!("{}", error_string);
}
let major: int = components[0].parse().ok().expect(&error_string);
let minor: int = components[1].parse().ok().expect(&error_string);
let major: isize = components[0].parse().ok().expect(&error_string);
let minor: isize = components[1].parse().ok().expect(&error_string);
return major * 1000 + minor;
}
pub fn lldb_version_to_int(version_string: &str) -> int {
pub fn lldb_version_to_int(version_string: &str) -> isize {
let error_string = format!(
"Encountered LLDB version string with unexpected format: {}",
version_string);
let error_string = error_string;
let major: int = version_string.parse().ok().expect(&error_string);
let major: isize = version_string.parse().ok().expect(&error_string);
return major;
}

View File

@ -758,7 +758,7 @@ fn run_debuginfo_lldb_test(config: &Config, props: &TestProps, testfile: &Path)
struct DebuggerCommands {
commands: Vec<String>,
check_lines: Vec<String>,
breakpoint_lines: Vec<uint>,
breakpoint_lines: Vec<usize>,
}
fn parse_debugger_commands(file_path: &Path, debugger_prefix: &str)
@ -1036,7 +1036,7 @@ fn is_compiler_error_or_warning(line: &str) -> bool {
scan_string(line, "warning", &mut i));
}
fn scan_until_char(haystack: &str, needle: char, idx: &mut uint) -> bool {
fn scan_until_char(haystack: &str, needle: char, idx: &mut usize) -> bool {
if *idx >= haystack.len() {
return false;
}
@ -1048,7 +1048,7 @@ fn scan_until_char(haystack: &str, needle: char, idx: &mut uint) -> bool {
return true;
}
fn scan_char(haystack: &str, needle: char, idx: &mut uint) -> bool {
fn scan_char(haystack: &str, needle: char, idx: &mut usize) -> bool {
if *idx >= haystack.len() {
return false;
}
@ -1060,7 +1060,7 @@ fn scan_char(haystack: &str, needle: char, idx: &mut uint) -> bool {
return true;
}
fn scan_integer(haystack: &str, idx: &mut uint) -> bool {
fn scan_integer(haystack: &str, idx: &mut usize) -> bool {
let mut i = *idx;
while i < haystack.len() {
let ch = haystack.char_at(i);
@ -1076,7 +1076,7 @@ fn scan_integer(haystack: &str, idx: &mut uint) -> bool {
return true;
}
fn scan_string(haystack: &str, needle: &str, idx: &mut uint) -> bool {
fn scan_string(haystack: &str, needle: &str, idx: &mut usize) -> bool {
let mut haystack_i = *idx;
let mut needle_i = 0;
while needle_i < needle.len() {
@ -1725,7 +1725,7 @@ fn disassemble_extract(config: &Config, _props: &TestProps,
}
fn count_extracted_lines(p: &Path) -> uint {
fn count_extracted_lines(p: &Path) -> usize {
let mut x = Vec::new();
File::open(&p.with_extension("ll")).unwrap().read_to_end(&mut x).unwrap();
let x = str::from_utf8(&x).unwrap();

View File

@ -2440,9 +2440,6 @@ The currently implemented features of the reference compiler are:
* `intrinsics` - Allows use of the "rust-intrinsics" ABI. Compiler intrinsics
are inherently unstable and no promise about them is made.
* `int_uint` - Allows the use of the `int` and `uint` types, which are deprecated.
Use `isize` and `usize` instead.
* `lang_items` - Allows use of the `#[lang]` attribute. Like `intrinsics`,
lang items are inherently unstable and no promise about them
is made.
@ -2759,7 +2756,7 @@ The following are examples of structure expressions:
```
# struct Point { x: f64, y: f64 }
# struct TuplePoint(f64, f64);
# mod game { pub struct User<'a> { pub name: &'a str, pub age: u32, pub score: uint } }
# mod game { pub struct User<'a> { pub name: &'a str, pub age: u32, pub score: usize } }
# struct Cookie; fn some_fn<T>(t: T) {}
Point {x: 10.0, y: 20.0};
TuplePoint(10.0, 20.0);
@ -3402,7 +3399,7 @@ subpattern`. For example:
#![feature(box_patterns)]
#![feature(box_syntax)]
enum List { Nil, Cons(uint, Box<List>) }
enum List { Nil, Cons(u32, Box<List>) }
fn is_sorted(list: &List) -> bool {
match *list {

View File

@ -401,7 +401,7 @@ Unsafe functions, on the other hand, advertise it to the world. An unsafe functi
this:
```
unsafe fn kaboom(ptr: *const int) -> int { *ptr }
unsafe fn kaboom(ptr: *const i32) -> i32 { *ptr }
```
This function can only be called from an `unsafe` block or another `unsafe` function.
@ -423,7 +423,7 @@ extern {
fn main() {
println!("You have readline version {} installed.",
rl_readline_version as int);
rl_readline_version as i32);
}
```

View File

@ -129,7 +129,7 @@ need, and it can make your lifetimes more complex.
To write a function that's generic over types of strings, use `&str`.
```
fn some_string_length(x: &str) -> uint {
fn some_string_length(x: &str) -> usize {
x.len()
}

View File

@ -1064,7 +1064,7 @@ pub fn fence(order: Ordering) {
reason = "renamed to AtomicIsize")]
#[allow(missing_docs)]
pub struct AtomicInt {
v: UnsafeCell<int>,
v: UnsafeCell<isize>,
}
#[allow(deprecated)]
@ -1075,7 +1075,7 @@ unsafe impl Sync for AtomicInt {}
reason = "renamed to AtomicUsize")]
#[allow(missing_docs)]
pub struct AtomicUint {
v: UnsafeCell<uint>,
v: UnsafeCell<usize>,
}
#[allow(deprecated)]
@ -1097,52 +1097,52 @@ pub const ATOMIC_UINT_INIT: AtomicUint =
#[allow(missing_docs, deprecated)]
impl AtomicInt {
#[inline]
pub fn new(v: int) -> AtomicInt {
pub fn new(v: isize) -> AtomicInt {
AtomicInt {v: UnsafeCell::new(v)}
}
#[inline]
pub fn load(&self, order: Ordering) -> int {
pub fn load(&self, order: Ordering) -> isize {
unsafe { atomic_load(self.v.get(), order) }
}
#[inline]
pub fn store(&self, val: int, order: Ordering) {
pub fn store(&self, val: isize, order: Ordering) {
unsafe { atomic_store(self.v.get(), val, order); }
}
#[inline]
pub fn swap(&self, val: int, order: Ordering) -> int {
pub fn swap(&self, val: isize, order: Ordering) -> isize {
unsafe { atomic_swap(self.v.get(), val, order) }
}
#[inline]
pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int {
pub fn compare_and_swap(&self, old: isize, new: isize, order: Ordering) -> isize {
unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
}
#[inline]
pub fn fetch_add(&self, val: int, order: Ordering) -> int {
pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
unsafe { atomic_add(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_sub(&self, val: int, order: Ordering) -> int {
pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
unsafe { atomic_sub(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_and(&self, val: int, order: Ordering) -> int {
pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
unsafe { atomic_and(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_or(&self, val: int, order: Ordering) -> int {
pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
unsafe { atomic_or(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_xor(&self, val: int, order: Ordering) -> int {
pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
unsafe { atomic_xor(self.v.get(), val, order) }
}
}
@ -1150,52 +1150,52 @@ impl AtomicInt {
#[allow(missing_docs, deprecated)]
impl AtomicUint {
#[inline]
pub fn new(v: uint) -> AtomicUint {
pub fn new(v: usize) -> AtomicUint {
AtomicUint { v: UnsafeCell::new(v) }
}
#[inline]
pub fn load(&self, order: Ordering) -> uint {
pub fn load(&self, order: Ordering) -> usize {
unsafe { atomic_load(self.v.get(), order) }
}
#[inline]
pub fn store(&self, val: uint, order: Ordering) {
pub fn store(&self, val: usize, order: Ordering) {
unsafe { atomic_store(self.v.get(), val, order); }
}
#[inline]
pub fn swap(&self, val: uint, order: Ordering) -> uint {
pub fn swap(&self, val: usize, order: Ordering) -> usize {
unsafe { atomic_swap(self.v.get(), val, order) }
}
#[inline]
pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint {
pub fn compare_and_swap(&self, old: usize, new: usize, order: Ordering) -> usize {
unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
}
#[inline]
pub fn fetch_add(&self, val: uint, order: Ordering) -> uint {
pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
unsafe { atomic_add(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint {
pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
unsafe { atomic_sub(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_and(&self, val: uint, order: Ordering) -> uint {
pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
unsafe { atomic_and(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_or(&self, val: uint, order: Ordering) -> uint {
pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
unsafe { atomic_or(self.v.get(), val, order) }
}
#[inline]
pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint {
pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
unsafe { atomic_xor(self.v.get(), val, order) }
}
}

View File

@ -125,7 +125,7 @@ pub fn float_to_str_bytes_common<T: Float, U, F>(
// otherwise as well.
let mut buf = [0; 1536];
let mut end = 0;
let radix_gen: T = cast(radix as int).unwrap();
let radix_gen: T = cast(radix as isize).unwrap();
let (num, exp) = match exp_format {
ExpNone => (num, 0),
@ -235,7 +235,7 @@ pub fn float_to_str_bytes_common<T: Float, U, F>(
let extra_digit = ascii2value(buf[end - 1]);
end -= 1;
if extra_digit >= radix / 2 { // -> need to round
let mut i: int = end as int - 1;
let mut i: isize = end as isize - 1;
loop {
// If reached left end of number, have to
// insert additional digit:

View File

@ -315,7 +315,7 @@ extern "rust-intrinsic" {
/// # #![feature(core)]
/// use std::ptr;
///
/// unsafe fn from_buf_raw<T>(ptr: *const T, elts: uint) -> Vec<T> {
/// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
/// let mut dst = Vec::with_capacity(elts);
/// dst.set_len(elts);
/// ptr::copy(dst.as_mut_ptr(), ptr, elts);

View File

@ -63,7 +63,6 @@
#![allow(raw_pointer_derive)]
#![deny(missing_docs)]
#![feature(int_uint)]
#![feature(intrinsics, lang_items)]
#![feature(on_unimplemented)]
#![feature(simd, unsafe_destructor)]

View File

@ -218,7 +218,7 @@ macro_rules! writeln {
/// Match arms:
///
/// ```
/// fn foo(x: Option<int>) {
/// fn foo(x: Option<i32>) {
/// match x {
/// Some(n) if n >= 0 => println!("Some(Non-negative)"),
/// Some(n) if n < 0 => println!("Some(Negative)"),

View File

@ -193,12 +193,12 @@ impl Float for f32 {
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn mantissa_digits(_: Option<f32>) -> uint { MANTISSA_DIGITS as uint }
fn mantissa_digits(_: Option<f32>) -> usize { MANTISSA_DIGITS as usize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn digits(_: Option<f32>) -> uint { DIGITS as uint }
fn digits(_: Option<f32>) -> usize { DIGITS as usize }
#[inline]
#[unstable(feature = "core")]
@ -208,22 +208,22 @@ impl Float for f32 {
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn min_exp(_: Option<f32>) -> int { MIN_EXP as int }
fn min_exp(_: Option<f32>) -> isize { MIN_EXP as isize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn max_exp(_: Option<f32>) -> int { MAX_EXP as int }
fn max_exp(_: Option<f32>) -> isize { MAX_EXP as isize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn min_10_exp(_: Option<f32>) -> int { MIN_10_EXP as int }
fn min_10_exp(_: Option<f32>) -> isize { MIN_10_EXP as isize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn max_10_exp(_: Option<f32>) -> int { MAX_10_EXP as int }
fn max_10_exp(_: Option<f32>) -> isize { MAX_10_EXP as isize }
#[inline]
#[unstable(feature = "core")]

View File

@ -200,12 +200,12 @@ impl Float for f64 {
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn mantissa_digits(_: Option<f64>) -> uint { MANTISSA_DIGITS as uint }
fn mantissa_digits(_: Option<f64>) -> usize { MANTISSA_DIGITS as usize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn digits(_: Option<f64>) -> uint { DIGITS as uint }
fn digits(_: Option<f64>) -> usize { DIGITS as usize }
#[inline]
#[unstable(feature = "core")]
@ -215,22 +215,22 @@ impl Float for f64 {
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn min_exp(_: Option<f64>) -> int { MIN_EXP as int }
fn min_exp(_: Option<f64>) -> isize { MIN_EXP as isize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn max_exp(_: Option<f64>) -> int { MAX_EXP as int }
fn max_exp(_: Option<f64>) -> isize { MAX_EXP as isize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn min_10_exp(_: Option<f64>) -> int { MIN_10_EXP as int }
fn min_10_exp(_: Option<f64>) -> isize { MIN_10_EXP as isize }
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0")]
fn max_10_exp(_: Option<f64>) -> int { MAX_10_EXP as int }
fn max_10_exp(_: Option<f64>) -> isize { MAX_10_EXP as isize }
#[inline]
#[unstable(feature = "core")]

View File

@ -52,8 +52,8 @@ pub trait Int
+ BitAnd<Output=Self>
+ BitOr<Output=Self>
+ BitXor<Output=Self>
+ Shl<uint, Output=Self>
+ Shr<uint, Output=Self>
+ Shl<usize, Output=Self>
+ Shr<usize, Output=Self>
+ WrappingOps
+ OverflowingOps
{
@ -565,7 +565,7 @@ uint_impl! { u64 = u64, 64,
intrinsics::u64_mul_with_overflow }
#[cfg(target_pointer_width = "32")]
uint_impl! { uint = u32, 32,
uint_impl! { usize = u32, 32,
intrinsics::ctpop32,
intrinsics::ctlz32,
intrinsics::cttz32,
@ -575,7 +575,7 @@ uint_impl! { uint = u32, 32,
intrinsics::u32_mul_with_overflow }
#[cfg(target_pointer_width = "64")]
uint_impl! { uint = u64, 64,
uint_impl! { usize = u64, 64,
intrinsics::ctpop64,
intrinsics::ctlz64,
intrinsics::cttz64,
@ -680,13 +680,13 @@ int_impl! { i64 = i64, u64, 64,
intrinsics::i64_mul_with_overflow }
#[cfg(target_pointer_width = "32")]
int_impl! { int = i32, u32, 32,
int_impl! { isize = i32, u32, 32,
intrinsics::i32_add_with_overflow,
intrinsics::i32_sub_with_overflow,
intrinsics::i32_mul_with_overflow }
#[cfg(target_pointer_width = "64")]
int_impl! { int = i64, u64, 64,
int_impl! { isize = i64, u64, 64,
intrinsics::i64_add_with_overflow,
intrinsics::i64_sub_with_overflow,
intrinsics::i64_mul_with_overflow }
@ -752,7 +752,7 @@ signed_int_impl! { i8 }
signed_int_impl! { i16 }
signed_int_impl! { i32 }
signed_int_impl! { i64 }
signed_int_impl! { int }
signed_int_impl! { isize }
// `Int` + `SignedInt` implemented for signed integers
macro_rules! int_impl {
@ -1232,7 +1232,7 @@ impl i64 {
#[cfg(target_pointer_width = "32")]
#[lang = "isize"]
impl isize {
int_impl! { int = i32, u32, 32,
int_impl! { isize = i32, u32, 32,
intrinsics::i32_add_with_overflow,
intrinsics::i32_sub_with_overflow,
intrinsics::i32_mul_with_overflow }
@ -1241,7 +1241,7 @@ impl isize {
#[cfg(target_pointer_width = "64")]
#[lang = "isize"]
impl isize {
int_impl! { int = i64, u64, 64,
int_impl! { isize = i64, u64, 64,
intrinsics::i64_add_with_overflow,
intrinsics::i64_sub_with_overflow,
intrinsics::i64_mul_with_overflow }
@ -1746,7 +1746,7 @@ impl u64 {
#[cfg(target_pointer_width = "32")]
#[lang = "usize"]
impl usize {
uint_impl! { uint = u32, 32,
uint_impl! { usize = u32, 32,
intrinsics::ctpop32,
intrinsics::ctlz32,
intrinsics::cttz32,
@ -1759,7 +1759,7 @@ impl usize {
#[cfg(target_pointer_width = "64")]
#[lang = "usize"]
impl usize {
uint_impl! { uint = u64, 64,
uint_impl! { usize = u64, 64,
intrinsics::ctpop64,
intrinsics::ctlz64,
intrinsics::cttz64,
@ -1772,11 +1772,11 @@ impl usize {
/// A generic trait for converting a value to a number.
#[unstable(feature = "core", reason = "trait is likely to be removed")]
pub trait ToPrimitive {
/// Converts the value of `self` to an `int`.
/// Converts the value of `self` to an `isize`.
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0", reason = "use to_isize")]
fn to_int(&self) -> Option<int> {
fn to_int(&self) -> Option<isize> {
self.to_i64().and_then(|x| x.to_isize())
}
@ -1807,11 +1807,11 @@ pub trait ToPrimitive {
/// Converts the value of `self` to an `i64`.
fn to_i64(&self) -> Option<i64>;
/// Converts the value of `self` to an `uint`.
/// Converts the value of `self` to an `usize`.
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0", reason = "use to_usize")]
fn to_uint(&self) -> Option<uint> {
fn to_uint(&self) -> Option<usize> {
self.to_u64().and_then(|x| x.to_usize())
}
@ -1893,7 +1893,7 @@ macro_rules! impl_to_primitive_int {
($T:ty) => (
impl ToPrimitive for $T {
#[inline]
fn to_int(&self) -> Option<int> { impl_to_primitive_int_to_int!($T, int, *self) }
fn to_int(&self) -> Option<isize> { impl_to_primitive_int_to_int!($T, isize, *self) }
#[inline]
fn to_isize(&self) -> Option<isize> { impl_to_primitive_int_to_int!($T, isize, *self) }
#[inline]
@ -1906,7 +1906,7 @@ macro_rules! impl_to_primitive_int {
fn to_i64(&self) -> Option<i64> { impl_to_primitive_int_to_int!($T, i64, *self) }
#[inline]
fn to_uint(&self) -> Option<uint> { impl_to_primitive_int_to_uint!($T, uint, *self) }
fn to_uint(&self) -> Option<usize> { impl_to_primitive_int_to_uint!($T, usize, *self) }
#[inline]
fn to_usize(&self) -> Option<usize> { impl_to_primitive_int_to_uint!($T, usize, *self) }
#[inline]
@ -1967,9 +1967,9 @@ macro_rules! impl_to_primitive_uint {
($T:ty) => (
impl ToPrimitive for $T {
#[inline]
fn to_int(&self) -> Option<int> { impl_to_primitive_uint_to_int!(int, *self) }
fn to_int(&self) -> Option<isize> { impl_to_primitive_uint_to_int!(isize, *self) }
#[inline]
fn to_isize(&self) -> Option<int> { impl_to_primitive_uint_to_int!(isize, *self) }
fn to_isize(&self) -> Option<isize> { impl_to_primitive_uint_to_int!(isize, *self) }
#[inline]
fn to_i8(&self) -> Option<i8> { impl_to_primitive_uint_to_int!(i8, *self) }
#[inline]
@ -1980,9 +1980,11 @@ macro_rules! impl_to_primitive_uint {
fn to_i64(&self) -> Option<i64> { impl_to_primitive_uint_to_int!(i64, *self) }
#[inline]
fn to_uint(&self) -> Option<uint> { impl_to_primitive_uint_to_uint!($T, uint, *self) }
fn to_uint(&self) -> Option<usize> { impl_to_primitive_uint_to_uint!($T, usize, *self) }
#[inline]
fn to_usize(&self) -> Option<uint> { impl_to_primitive_uint_to_uint!($T, usize, *self) }
fn to_usize(&self) -> Option<usize> {
impl_to_primitive_uint_to_uint!($T, usize, *self)
}
#[inline]
fn to_u8(&self) -> Option<u8> { impl_to_primitive_uint_to_uint!($T, u8, *self) }
#[inline]
@ -2026,9 +2028,9 @@ macro_rules! impl_to_primitive_float {
($T:ident) => (
impl ToPrimitive for $T {
#[inline]
fn to_int(&self) -> Option<int> { Some(*self as int) }
fn to_int(&self) -> Option<isize> { Some(*self as isize) }
#[inline]
fn to_isize(&self) -> Option<int> { Some(*self as isize) }
fn to_isize(&self) -> Option<isize> { Some(*self as isize) }
#[inline]
fn to_i8(&self) -> Option<i8> { Some(*self as i8) }
#[inline]
@ -2039,9 +2041,9 @@ macro_rules! impl_to_primitive_float {
fn to_i64(&self) -> Option<i64> { Some(*self as i64) }
#[inline]
fn to_uint(&self) -> Option<uint> { Some(*self as uint) }
fn to_uint(&self) -> Option<usize> { Some(*self as usize) }
#[inline]
fn to_usize(&self) -> Option<uint> { Some(*self as usize) }
fn to_usize(&self) -> Option<usize> { Some(*self as usize) }
#[inline]
fn to_u8(&self) -> Option<u8> { Some(*self as u8) }
#[inline]
@ -2065,12 +2067,12 @@ impl_to_primitive_float! { f64 }
/// A generic trait for converting a number to a value.
#[unstable(feature = "core", reason = "trait is likely to be removed")]
pub trait FromPrimitive : ::marker::Sized {
/// Convert an `int` to return an optional value of this type. If the
/// Convert an `isize` to return an optional value of this type. If the
/// value cannot be represented by this value, the `None` is returned.
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0", reason = "use from_isize")]
fn from_int(n: int) -> Option<Self> {
fn from_int(n: isize) -> Option<Self> {
FromPrimitive::from_i64(n as i64)
}
@ -2106,12 +2108,12 @@ pub trait FromPrimitive : ::marker::Sized {
/// type cannot be represented by this value, the `None` is returned.
fn from_i64(n: i64) -> Option<Self>;
/// Convert an `uint` to return an optional value of this type. If the
/// Convert an `usize` to return an optional value of this type. If the
/// type cannot be represented by this value, the `None` is returned.
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0", reason = "use from_usize")]
fn from_uint(n: uint) -> Option<Self> {
fn from_uint(n: usize) -> Option<Self> {
FromPrimitive::from_u64(n as u64)
}
@ -2165,7 +2167,7 @@ pub trait FromPrimitive : ::marker::Sized {
/// A utility function that just calls `FromPrimitive::from_int`.
#[unstable(feature = "core", reason = "likely to be removed")]
#[deprecated(since = "1.0.0", reason = "use from_isize")]
pub fn from_int<A: FromPrimitive>(n: int) -> Option<A> {
pub fn from_int<A: FromPrimitive>(n: isize) -> Option<A> {
FromPrimitive::from_isize(n)
}
@ -2202,7 +2204,7 @@ pub fn from_i64<A: FromPrimitive>(n: i64) -> Option<A> {
/// A utility function that just calls `FromPrimitive::from_uint`.
#[unstable(feature = "core", reason = "likely to be removed")]
#[deprecated(since = "1.0.0", reason = "use from_uint")]
pub fn from_uint<A: FromPrimitive>(n: uint) -> Option<A> {
pub fn from_uint<A: FromPrimitive>(n: usize) -> Option<A> {
FromPrimitive::from_usize(n)
}
@ -2252,13 +2254,13 @@ macro_rules! impl_from_primitive {
($T:ty, $to_ty:ident) => (
#[allow(deprecated)]
impl FromPrimitive for $T {
#[inline] fn from_int(n: int) -> Option<$T> { n.$to_ty() }
#[inline] fn from_int(n: isize) -> Option<$T> { n.$to_ty() }
#[inline] fn from_i8(n: i8) -> Option<$T> { n.$to_ty() }
#[inline] fn from_i16(n: i16) -> Option<$T> { n.$to_ty() }
#[inline] fn from_i32(n: i32) -> Option<$T> { n.$to_ty() }
#[inline] fn from_i64(n: i64) -> Option<$T> { n.$to_ty() }
#[inline] fn from_uint(n: uint) -> Option<$T> { n.$to_ty() }
#[inline] fn from_uint(n: usize) -> Option<$T> { n.$to_ty() }
#[inline] fn from_u8(n: u8) -> Option<$T> { n.$to_ty() }
#[inline] fn from_u16(n: u16) -> Option<$T> { n.$to_ty() }
#[inline] fn from_u32(n: u32) -> Option<$T> { n.$to_ty() }
@ -2270,12 +2272,12 @@ macro_rules! impl_from_primitive {
)
}
impl_from_primitive! { int, to_int }
impl_from_primitive! { isize, to_int }
impl_from_primitive! { i8, to_i8 }
impl_from_primitive! { i16, to_i16 }
impl_from_primitive! { i32, to_i32 }
impl_from_primitive! { i64, to_i64 }
impl_from_primitive! { uint, to_uint }
impl_from_primitive! { usize, to_uint }
impl_from_primitive! { u8, to_u8 }
impl_from_primitive! { u16, to_u16 }
impl_from_primitive! { u32, to_u32 }
@ -2327,12 +2329,12 @@ impl_num_cast! { u8, to_u8 }
impl_num_cast! { u16, to_u16 }
impl_num_cast! { u32, to_u32 }
impl_num_cast! { u64, to_u64 }
impl_num_cast! { uint, to_uint }
impl_num_cast! { usize, to_uint }
impl_num_cast! { i8, to_i8 }
impl_num_cast! { i16, to_i16 }
impl_num_cast! { i32, to_i32 }
impl_num_cast! { i64, to_i64 }
impl_num_cast! { int, to_int }
impl_num_cast! { isize, to_int }
impl_num_cast! { f32, to_f32 }
impl_num_cast! { f64, to_f64 }
@ -2392,12 +2394,12 @@ pub trait Float
#[deprecated(since = "1.0.0",
reason = "use `std::f32::MANTISSA_DIGITS` or \
`std::f64::MANTISSA_DIGITS` as appropriate")]
fn mantissa_digits(unused_self: Option<Self>) -> uint;
fn mantissa_digits(unused_self: Option<Self>) -> usize;
/// Returns the number of base-10 digits of precision that this type supports.
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
reason = "use `std::f32::DIGITS` or `std::f64::DIGITS` as appropriate")]
fn digits(unused_self: Option<Self>) -> uint;
fn digits(unused_self: Option<Self>) -> usize;
/// Returns the difference between 1.0 and the smallest representable number larger than 1.0.
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
@ -2407,22 +2409,22 @@ pub trait Float
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
reason = "use `std::f32::MIN_EXP` or `std::f64::MIN_EXP` as appropriate")]
fn min_exp(unused_self: Option<Self>) -> int;
fn min_exp(unused_self: Option<Self>) -> isize;
/// Returns the maximum binary exponent that this type can represent.
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
reason = "use `std::f32::MAX_EXP` or `std::f64::MAX_EXP` as appropriate")]
fn max_exp(unused_self: Option<Self>) -> int;
fn max_exp(unused_self: Option<Self>) -> isize;
/// Returns the minimum base-10 exponent that this type can represent.
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
reason = "use `std::f32::MIN_10_EXP` or `std::f64::MIN_10_EXP` as appropriate")]
fn min_10_exp(unused_self: Option<Self>) -> int;
fn min_10_exp(unused_self: Option<Self>) -> isize;
/// Returns the maximum base-10 exponent that this type can represent.
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
reason = "use `std::f32::MAX_10_EXP` or `std::f64::MAX_10_EXP` as appropriate")]
fn max_10_exp(unused_self: Option<Self>) -> int;
fn max_10_exp(unused_self: Option<Self>) -> isize;
/// Returns the smallest finite value that this type can represent.
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
@ -2625,7 +2627,7 @@ macro_rules! from_str_radix_float_impl {
let mut prev_sig = sig;
let mut cs = src.chars().enumerate();
// Exponent prefix and exponent index offset
let mut exp_info = None::<(char, uint)>;
let mut exp_info = None::<(char, usize)>;
// Parse the integer part of the significand
for (i, c) in cs.by_ref() {
@ -2636,9 +2638,9 @@ macro_rules! from_str_radix_float_impl {
// add/subtract current digit depending on sign
if is_positive {
sig = sig + ((digit as int) as $T);
sig = sig + ((digit as isize) as $T);
} else {
sig = sig - ((digit as int) as $T);
sig = sig - ((digit as isize) as $T);
}
// Detect overflow by comparing to last value, except
@ -2719,9 +2721,9 @@ macro_rules! from_str_radix_float_impl {
// Parse the exponent as decimal integer
let src = &src[offset..];
let (is_positive, exp) = match src.slice_shift_char() {
Some(('-', src)) => (false, src.parse::<uint>()),
Some(('+', src)) => (true, src.parse::<uint>()),
Some((_, _)) => (true, src.parse::<uint>()),
Some(('-', src)) => (false, src.parse::<usize>()),
Some(('+', src)) => (true, src.parse::<usize>()),
Some((_, _)) => (true, src.parse::<usize>()),
None => return Err(PFE { kind: Invalid }),
};

View File

@ -64,7 +64,7 @@ macro_rules! wrapping_impl {
)*)
}
wrapping_impl! { uint u8 u16 u32 u64 int i8 i16 i32 i64 }
wrapping_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
#[unstable(feature = "core", reason = "may be removed, renamed, or relocated")]
#[derive(PartialEq,Eq,PartialOrd,Ord,Clone,Copy)]
@ -132,20 +132,20 @@ impl<T:WrappingOps+BitAnd<Output=T>> BitAnd for Wrapping<T> {
}
}
impl<T:WrappingOps+Shl<uint,Output=T>> Shl<uint> for Wrapping<T> {
impl<T:WrappingOps+Shl<usize,Output=T>> Shl<usize> for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn shl(self, other: uint) -> Wrapping<T> {
fn shl(self, other: usize) -> Wrapping<T> {
Wrapping(self.0 << other)
}
}
impl<T:WrappingOps+Shr<uint,Output=T>> Shr<uint> for Wrapping<T> {
impl<T:WrappingOps+Shr<usize,Output=T>> Shr<usize> for Wrapping<T> {
type Output = Wrapping<T>;
#[inline(always)]
fn shr(self, other: uint) -> Wrapping<T> {
fn shr(self, other: usize) -> Wrapping<T> {
Wrapping(self.0 >> other)
}
}

View File

@ -16,7 +16,7 @@
//! interface for panicking is:
//!
//! ```ignore
//! fn panic_impl(fmt: fmt::Arguments, &(&'static str, uint)) -> !;
//! fn panic_impl(fmt: fmt::Arguments, &(&'static str, usize)) -> !;
//! ```
//!
//! This definition allows for panicking with any general message, but it does not
@ -58,8 +58,8 @@ pub fn panic_fmt(fmt: fmt::Arguments, file_line: &(&'static str, u32)) -> ! {
#[allow(improper_ctypes)]
extern {
#[lang = "panic_fmt"]
fn panic_impl(fmt: fmt::Arguments, file: &'static str, line: uint) -> !;
fn panic_impl(fmt: fmt::Arguments, file: &'static str, line: usize) -> !;
}
let (file, line) = *file_line;
unsafe { panic_impl(fmt, file, line as uint) }
unsafe { panic_impl(fmt, file, line as usize) }
}

View File

@ -60,22 +60,22 @@
//! that make working with it more succinct.
//!
//! ```
//! let good_result: Result<int, int> = Ok(10);
//! let bad_result: Result<int, int> = Err(10);
//! let good_result: Result<i32, i32> = Ok(10);
//! let bad_result: Result<i32, i32> = Err(10);
//!
//! // The `is_ok` and `is_err` methods do what they say.
//! assert!(good_result.is_ok() && !good_result.is_err());
//! assert!(bad_result.is_err() && !bad_result.is_ok());
//!
//! // `map` consumes the `Result` and produces another.
//! let good_result: Result<int, int> = good_result.map(|i| i + 1);
//! let bad_result: Result<int, int> = bad_result.map(|i| i - 1);
//! let good_result: Result<i32, i32> = good_result.map(|i| i + 1);
//! let bad_result: Result<i32, i32> = bad_result.map(|i| i - 1);
//!
//! // Use `and_then` to continue the computation.
//! let good_result: Result<bool, int> = good_result.and_then(|i| Ok(i == 11));
//! let good_result: Result<bool, i32> = good_result.and_then(|i| Ok(i == 11));
//!
//! // Use `or_else` to handle the error.
//! let bad_result: Result<int, int> = bad_result.or_else(|i| Ok(11));
//! let bad_result: Result<i32, i32> = bad_result.or_else(|i| Ok(11));
//!
//! // Consume the result and return the contents with `unwrap`.
//! let final_awesome_result = good_result.unwrap();
@ -182,8 +182,8 @@
//!
//! struct Info {
//! name: String,
//! age: int,
//! rating: int
//! age: i32,
//! rating: i32,
//! }
//!
//! fn write_info(info: &Info) -> Result<(), IoError> {
@ -208,8 +208,8 @@
//!
//! struct Info {
//! name: String,
//! age: int,
//! rating: int
//! age: i32,
//! rating: i32,
//! }
//!
//! fn write_info(info: &Info) -> Result<(), IoError> {
@ -282,10 +282,10 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
/// let x: Result<int, &str> = Ok(-3);
/// let x: Result<i32, &str> = Ok(-3);
/// assert_eq!(x.is_ok(), true);
///
/// let x: Result<int, &str> = Err("Some error message");
/// let x: Result<i32, &str> = Err("Some error message");
/// assert_eq!(x.is_ok(), false);
/// ```
#[inline]
@ -302,10 +302,10 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
/// let x: Result<int, &str> = Ok(-3);
/// let x: Result<i32, &str> = Ok(-3);
/// assert_eq!(x.is_err(), false);
///
/// let x: Result<int, &str> = Err("Some error message");
/// let x: Result<i32, &str> = Err("Some error message");
/// assert_eq!(x.is_err(), true);
/// ```
#[inline]
@ -392,18 +392,18 @@ impl<T, E> Result<T, E> {
/// Convert from `Result<T, E>` to `Result<&mut T, &mut E>`
///
/// ```
/// fn mutate(r: &mut Result<int, int>) {
/// fn mutate(r: &mut Result<i32, i32>) {
/// match r.as_mut() {
/// Ok(&mut ref mut v) => *v = 42,
/// Err(&mut ref mut e) => *e = 0,
/// }
/// }
///
/// let mut x: Result<int, int> = Ok(2);
/// let mut x: Result<i32, i32> = Ok(2);
/// mutate(&mut x);
/// assert_eq!(x.unwrap(), 42);
///
/// let mut x: Result<int, int> = Err(13);
/// let mut x: Result<i32, i32> = Err(13);
/// mutate(&mut x);
/// assert_eq!(x.unwrap_err(), 0);
/// ```
@ -486,8 +486,8 @@ impl<T, E> Result<T, E> {
/// while !buffer.is_empty() {
/// let line: IoResult<String> = buffer.read_line();
/// // Convert the string line to a number using `map` and `from_str`
/// let val: IoResult<int> = line.map(|line| {
/// line.trim_right().parse::<int>().unwrap_or(0)
/// let val: IoResult<i32> = line.map(|line| {
/// line.trim_right().parse::<i32>().unwrap_or(0)
/// });
/// // Add the value if there were no errors, otherwise add 0
/// sum += val.unwrap_or(0);

View File

@ -1704,7 +1704,7 @@ impl StrExt for str {
#[inline]
unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
mem::transmute(Slice {
data: self.as_ptr().offset(begin as int),
data: self.as_ptr().offset(begin as isize),
len: end - begin,
})
}

View File

@ -37,9 +37,9 @@ fn any_referenced() {
fn any_owning() {
let (a, b, c) = (box 5_usize as Box<Any>, box TEST as Box<Any>, box Test as Box<Any>);
assert!(a.is::<uint>());
assert!(!b.is::<uint>());
assert!(!c.is::<uint>());
assert!(a.is::<usize>());
assert!(!b.is::<usize>());
assert!(!c.is::<usize>());
assert!(!a.is::<&'static str>());
assert!(b.is::<&'static str>());
@ -54,7 +54,7 @@ fn any_owning() {
fn any_downcast_ref() {
let a = &5_usize as &Any;
match a.downcast_ref::<uint>() {
match a.downcast_ref::<usize>() {
Some(&5) => {}
x => panic!("Unexpected value {:?}", x)
}
@ -71,10 +71,10 @@ fn any_downcast_mut() {
let mut b: Box<_> = box 7_usize;
let a_r = &mut a as &mut Any;
let tmp: &mut uint = &mut *b;
let tmp: &mut usize = &mut *b;
let b_r = tmp as &mut Any;
match a_r.downcast_mut::<uint>() {
match a_r.downcast_mut::<usize>() {
Some(x) => {
assert_eq!(*x, 5);
*x = 612;
@ -82,7 +82,7 @@ fn any_downcast_mut() {
x => panic!("Unexpected value {:?}", x)
}
match b_r.downcast_mut::<uint>() {
match b_r.downcast_mut::<usize>() {
Some(x) => {
assert_eq!(*x, 7);
*x = 413;
@ -100,12 +100,12 @@ fn any_downcast_mut() {
x => panic!("Unexpected value {:?}", x)
}
match a_r.downcast_mut::<uint>() {
match a_r.downcast_mut::<usize>() {
Some(&mut 612) => {}
x => panic!("Unexpected value {:?}", x)
}
match b_r.downcast_mut::<uint>() {
match b_r.downcast_mut::<usize>() {
Some(&mut 413) => {}
x => panic!("Unexpected value {:?}", x)
}
@ -115,8 +115,8 @@ fn any_downcast_mut() {
fn any_fixed_vec() {
let test = [0_usize; 8];
let test = &test as &Any;
assert!(test.is::<[uint; 8]>());
assert!(!test.is::<[uint; 10]>());
assert!(test.is::<[usize; 8]>());
assert!(!test.is::<[usize; 10]>());
}
@ -126,6 +126,6 @@ fn bench_downcast_ref(b: &mut Bencher) {
let mut x = 0;
let mut y = &mut x as &mut Any;
test::black_box(&mut y);
test::black_box(y.downcast_ref::<int>() == Some(&0));
test::black_box(y.downcast_ref::<isize>() == Some(&0));
});
}

View File

@ -134,19 +134,19 @@ fn clone_ref_updates_flag() {
#[test]
fn as_unsafe_cell() {
let c1: Cell<uint> = Cell::new(0);
let c1: Cell<usize> = Cell::new(0);
c1.set(1);
assert_eq!(1, unsafe { *c1.as_unsafe_cell().get() });
let c2: Cell<uint> = Cell::new(0);
let c2: Cell<usize> = Cell::new(0);
unsafe { *c2.as_unsafe_cell().get() = 1; }
assert_eq!(1, c2.get());
let r1: RefCell<uint> = RefCell::new(0);
let r1: RefCell<usize> = RefCell::new(0);
*r1.borrow_mut() = 1;
assert_eq!(1, unsafe { *r1.as_unsafe_cell().get() });
let r2: RefCell<uint> = RefCell::new(0);
let r2: RefCell<usize> = RefCell::new(0);
unsafe { *r2.as_unsafe_cell().get() = 1; }
assert_eq!(1, *r2.borrow());
}

View File

@ -114,7 +114,7 @@ fn test_user_defined_eq() {
// Our type.
struct SketchyNum {
num : int
num : isize
}
// Our implementation of `PartialEq` to support `==` and `!=`.

View File

@ -19,7 +19,7 @@ use test::Bencher;
#[test]
fn test_lt() {
let empty: [int; 0] = [];
let empty: [isize; 0] = [];
let xs = [1,2,3];
let ys = [1,2,0];
@ -73,7 +73,7 @@ fn test_multi_iter() {
#[test]
fn test_counter_from_iter() {
let it = count(0, 5).take(10);
let xs: Vec<int> = FromIterator::from_iter(it);
let xs: Vec<isize> = FromIterator::from_iter(it);
assert_eq!(xs, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45]);
}
@ -104,7 +104,7 @@ fn test_iterator_chain() {
fn test_filter_map() {
let it = count(0, 1).take(10)
.filter_map(|x| if x % 2 == 0 { Some(x*x) } else { None });
assert_eq!(it.collect::<Vec<uint>>(), [0*0, 2*2, 4*4, 6*6, 8*8]);
assert_eq!(it.collect::<Vec<usize>>(), [0*0, 2*2, 4*4, 6*6, 8*8]);
}
#[test]
@ -224,8 +224,8 @@ fn test_iterator_take_short() {
#[test]
fn test_iterator_scan() {
// test the type inference
fn add(old: &mut int, new: &uint) -> Option<f64> {
*old += *new as int;
fn add(old: &mut isize, new: &usize) -> Option<f64> {
*old += *new as isize;
Some(*old as f64)
}
let xs = [0, 1, 2, 3, 4];
@ -261,7 +261,7 @@ fn test_inspect() {
let ys = xs.iter()
.cloned()
.inspect(|_| n += 1)
.collect::<Vec<uint>>();
.collect::<Vec<usize>>();
assert_eq!(n, xs.len());
assert_eq!(&xs[..], &ys[..]);
@ -269,7 +269,7 @@ fn test_inspect() {
#[test]
fn test_unfoldr() {
fn count(st: &mut uint) -> Option<uint> {
fn count(st: &mut usize) -> Option<usize> {
if *st < 10 {
let ret = Some(*st);
*st += 1;
@ -398,14 +398,14 @@ fn test_iterator_size_hint() {
#[test]
fn test_collect() {
let a = vec![1, 2, 3, 4, 5];
let b: Vec<int> = a.iter().cloned().collect();
let b: Vec<isize> = a.iter().cloned().collect();
assert!(a == b);
}
#[test]
fn test_all() {
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
let v: Box<[int]> = Box::new([1, 2, 3, 4, 5]);
let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]);
assert!(v.iter().all(|&x| x < 10));
assert!(!v.iter().all(|&x| x % 2 == 0));
assert!(!v.iter().all(|&x| x > 100));
@ -415,7 +415,7 @@ fn test_all() {
#[test]
fn test_any() {
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
let v: Box<[int]> = Box::new([1, 2, 3, 4, 5]);
let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]);
assert!(v.iter().any(|&x| x < 10));
assert!(v.iter().any(|&x| x % 2 == 0));
assert!(!v.iter().any(|&x| x > 100));
@ -424,7 +424,7 @@ fn test_any() {
#[test]
fn test_find() {
let v: &[int] = &[1, 3, 9, 27, 103, 14, 11];
let v: &[isize] = &[1, 3, 9, 27, 103, 14, 11];
assert_eq!(*v.iter().find(|&&x| x & 1 == 0).unwrap(), 14);
assert_eq!(*v.iter().find(|&&x| x % 3 == 0).unwrap(), 3);
assert!(v.iter().find(|&&x| x % 12 == 0).is_none());
@ -448,13 +448,13 @@ fn test_count() {
#[test]
fn test_max_by() {
let xs: &[int] = &[-3, 0, 1, 5, -10];
let xs: &[isize] = &[-3, 0, 1, 5, -10];
assert_eq!(*xs.iter().max_by(|x| x.abs()).unwrap(), -10);
}
#[test]
fn test_min_by() {
let xs: &[int] = &[-3, 0, 1, 5, -10];
let xs: &[isize] = &[-3, 0, 1, 5, -10];
assert_eq!(*xs.iter().min_by(|x| x.abs()).unwrap(), 0);
}
@ -473,7 +473,7 @@ fn test_rev() {
let mut it = xs.iter();
it.next();
it.next();
assert!(it.rev().cloned().collect::<Vec<int>>() ==
assert!(it.rev().cloned().collect::<Vec<isize>>() ==
vec![16, 14, 12, 10, 8, 6]);
}
@ -572,8 +572,8 @@ fn test_double_ended_chain() {
#[test]
fn test_rposition() {
fn f(xy: &(int, char)) -> bool { let (_x, y) = *xy; y == 'b' }
fn g(xy: &(int, char)) -> bool { let (_x, y) = *xy; y == 'd' }
fn f(xy: &(isize, char)) -> bool { let (_x, y) = *xy; y == 'b' }
fn g(xy: &(isize, char)) -> bool { let (_x, y) = *xy; y == 'd' }
let v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')];
assert_eq!(v.iter().rposition(f), Some(3));
@ -598,7 +598,7 @@ fn test_rposition_panic() {
#[cfg(test)]
fn check_randacc_iter<A, T>(a: T, len: uint) where
fn check_randacc_iter<A, T>(a: T, len: usize) where
A: PartialEq,
T: Clone + RandomAccessIterator + Iterator<Item=A>,
{
@ -684,7 +684,7 @@ fn test_random_access_zip() {
#[test]
fn test_random_access_take() {
let xs = [1, 2, 3, 4, 5];
let empty: &[int] = &[];
let empty: &[isize] = &[];
check_randacc_iter(xs.iter().take(3), 3);
check_randacc_iter(xs.iter().take(20), xs.len());
check_randacc_iter(xs.iter().take(0), 0);
@ -694,7 +694,7 @@ fn test_random_access_take() {
#[test]
fn test_random_access_skip() {
let xs = [1, 2, 3, 4, 5];
let empty: &[int] = &[];
let empty: &[isize] = &[];
check_randacc_iter(xs.iter().skip(2), xs.len() - 2);
check_randacc_iter(empty.iter().skip(2), 0);
}
@ -726,7 +726,7 @@ fn test_random_access_map() {
#[test]
fn test_random_access_cycle() {
let xs = [1, 2, 3, 4, 5];
let empty: &[int] = &[];
let empty: &[isize] = &[];
check_randacc_iter(xs.iter().cycle().take(27), 27);
check_randacc_iter(empty.iter().cycle(), 0);
}
@ -755,7 +755,7 @@ fn test_range() {
assert_eq!((200..200).rev().count(), 0);
assert_eq!((0..100).size_hint(), (100, Some(100)));
// this test is only meaningful when sizeof uint < sizeof u64
// this test is only meaningful when sizeof usize < sizeof u64
assert_eq!((usize::MAX - 1..usize::MAX).size_hint(), (1, Some(1)));
assert_eq!((-10..-1).size_hint(), (9, Some(9)));
assert_eq!((-1..-10).size_hint(), (0, Some(0)));
@ -763,34 +763,34 @@ fn test_range() {
#[test]
fn test_range_inclusive() {
assert!(range_inclusive(0, 5).collect::<Vec<int>>() ==
assert!(range_inclusive(0, 5).collect::<Vec<isize>>() ==
vec![0, 1, 2, 3, 4, 5]);
assert!(range_inclusive(0, 5).rev().collect::<Vec<int>>() ==
assert!(range_inclusive(0, 5).rev().collect::<Vec<isize>>() ==
vec![5, 4, 3, 2, 1, 0]);
assert_eq!(range_inclusive(200, -5).count(), 0);
assert_eq!(range_inclusive(200, -5).rev().count(), 0);
assert_eq!(range_inclusive(200, 200).collect::<Vec<int>>(), [200]);
assert_eq!(range_inclusive(200, 200).rev().collect::<Vec<int>>(), [200]);
assert_eq!(range_inclusive(200, 200).collect::<Vec<isize>>(), [200]);
assert_eq!(range_inclusive(200, 200).rev().collect::<Vec<isize>>(), [200]);
}
#[test]
fn test_range_step() {
assert_eq!((0..20).step_by(5).collect::<Vec<int>>(), [0, 5, 10, 15]);
assert_eq!((20..0).step_by(-5).collect::<Vec<int>>(), [20, 15, 10, 5]);
assert_eq!((20..0).step_by(-6).collect::<Vec<int>>(), [20, 14, 8, 2]);
assert_eq!((0..20).step_by(5).collect::<Vec<isize>>(), [0, 5, 10, 15]);
assert_eq!((20..0).step_by(-5).collect::<Vec<isize>>(), [20, 15, 10, 5]);
assert_eq!((20..0).step_by(-6).collect::<Vec<isize>>(), [20, 14, 8, 2]);
assert_eq!((200..255).step_by(50).collect::<Vec<u8>>(), [200, 250]);
assert_eq!((200..-5).step_by(1).collect::<Vec<int>>(), []);
assert_eq!((200..200).step_by(1).collect::<Vec<int>>(), []);
assert_eq!((200..-5).step_by(1).collect::<Vec<isize>>(), []);
assert_eq!((200..200).step_by(1).collect::<Vec<isize>>(), []);
}
#[test]
fn test_range_step_inclusive() {
assert_eq!(range_step_inclusive(0, 20, 5).collect::<Vec<int>>(), [0, 5, 10, 15, 20]);
assert_eq!(range_step_inclusive(20, 0, -5).collect::<Vec<int>>(), [20, 15, 10, 5, 0]);
assert_eq!(range_step_inclusive(20, 0, -6).collect::<Vec<int>>(), [20, 14, 8, 2]);
assert_eq!(range_step_inclusive(0, 20, 5).collect::<Vec<isize>>(), [0, 5, 10, 15, 20]);
assert_eq!(range_step_inclusive(20, 0, -5).collect::<Vec<isize>>(), [20, 15, 10, 5, 0]);
assert_eq!(range_step_inclusive(20, 0, -6).collect::<Vec<isize>>(), [20, 14, 8, 2]);
assert_eq!(range_step_inclusive(200, 255, 50).collect::<Vec<u8>>(), [200, 250]);
assert_eq!(range_step_inclusive(200, -5, 1).collect::<Vec<int>>(), []);
assert_eq!(range_step_inclusive(200, 200, 1).collect::<Vec<int>>(), [200]);
assert_eq!(range_step_inclusive(200, -5, 1).collect::<Vec<isize>>(), []);
assert_eq!(range_step_inclusive(200, 200, 1).collect::<Vec<isize>>(), [200]);
}
#[test]
@ -811,7 +811,7 @@ fn test_peekable_is_empty() {
#[test]
fn test_min_max() {
let v: [int; 0] = [];
let v: [isize; 0] = [];
assert_eq!(v.iter().min_max(), NoElements);
let v = [1];
@ -829,7 +829,7 @@ fn test_min_max() {
#[test]
fn test_min_max_result() {
let r: MinMaxResult<int> = NoElements;
let r: MinMaxResult<isize> = NoElements;
assert_eq!(r.into_option(), None);
let r = OneElement(1);
@ -876,7 +876,7 @@ fn test_fuse() {
#[bench]
fn bench_rposition(b: &mut Bencher) {
let it: Vec<uint> = (0..300).collect();
let it: Vec<usize> = (0..300).collect();
b.iter(|| {
it.iter().rposition(|&x| x <= 150);
});

View File

@ -21,15 +21,15 @@ fn size_of_basic() {
#[test]
#[cfg(target_pointer_width = "32")]
fn size_of_32() {
assert_eq!(size_of::<uint>(), 4);
assert_eq!(size_of::<*const uint>(), 4);
assert_eq!(size_of::<usize>(), 4);
assert_eq!(size_of::<*const usize>(), 4);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn size_of_64() {
assert_eq!(size_of::<uint>(), 8);
assert_eq!(size_of::<*const uint>(), 8);
assert_eq!(size_of::<usize>(), 8);
assert_eq!(size_of::<*const usize>(), 8);
}
#[test]
@ -50,15 +50,15 @@ fn align_of_basic() {
#[test]
#[cfg(target_pointer_width = "32")]
fn align_of_32() {
assert_eq!(align_of::<uint>(), 4);
assert_eq!(align_of::<*const uint>(), 4);
assert_eq!(align_of::<usize>(), 4);
assert_eq!(align_of::<*const usize>(), 4);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn align_of_64() {
assert_eq!(align_of::<uint>(), 8);
assert_eq!(align_of::<*const uint>(), 8);
assert_eq!(align_of::<usize>(), 8);
assert_eq!(align_of::<*const usize>(), 8);
}
#[test]
@ -93,12 +93,12 @@ fn test_transmute_copy() {
#[test]
fn test_transmute() {
trait Foo { fn dummy(&self) { } }
impl Foo for int {}
impl Foo for isize {}
let a = box 100isize as Box<Foo>;
unsafe {
let x: ::core::raw::TraitObject = transmute(a);
assert!(*(x.data as *const int) == 100);
assert!(*(x.data as *const isize) == 100);
let _x: Box<Foo> = transmute(x);
}
@ -112,15 +112,15 @@ fn test_transmute() {
// Static/dynamic method dispatch
struct Struct {
field: int
field: isize
}
trait Trait {
fn method(&self) -> int;
fn method(&self) -> isize;
}
impl Trait for Struct {
fn method(&self) -> int {
fn method(&self) -> isize {
self.field
}
}

View File

@ -43,7 +43,7 @@ fn test_match_on_nonzero_option() {
#[test]
fn test_match_option_empty_vec() {
let a: Option<Vec<int>> = Some(vec![]);
let a: Option<Vec<isize>> = Some(vec![]);
match a {
None => panic!("unexpected None while matching on Some(vec![])"),
_ => {}

View File

@ -14,7 +14,7 @@ use core::ops::{Range, RangeFull, RangeFrom, RangeTo};
// Overhead of dtors
struct HasDtor {
_x: int
_x: isize
}
impl Drop for HasDtor {

View File

@ -17,10 +17,10 @@ use core::clone::Clone;
fn test_get_ptr() {
unsafe {
let x: Box<_> = box 0;
let addr_x: *const int = mem::transmute(&*x);
let addr_x: *const isize = mem::transmute(&*x);
let opt = Some(x);
let y = opt.unwrap();
let addr_y: *const int = mem::transmute(&*y);
let addr_y: *const isize = mem::transmute(&*y);
assert_eq!(addr_x, addr_y);
}
}
@ -41,7 +41,7 @@ fn test_get_resource() {
use core::cell::RefCell;
struct R {
i: Rc<RefCell<int>>,
i: Rc<RefCell<isize>>,
}
#[unsafe_destructor]
@ -53,7 +53,7 @@ fn test_get_resource() {
}
}
fn r(i: Rc<RefCell<int>>) -> R {
fn r(i: Rc<RefCell<isize>>) -> R {
R {
i: i
}
@ -89,44 +89,44 @@ fn test_option_too_much_dance() {
#[test]
fn test_and() {
let x: Option<int> = Some(1);
let x: Option<isize> = Some(1);
assert_eq!(x.and(Some(2)), Some(2));
assert_eq!(x.and(None::<int>), None);
assert_eq!(x.and(None::<isize>), None);
let x: Option<int> = None;
let x: Option<isize> = None;
assert_eq!(x.and(Some(2)), None);
assert_eq!(x.and(None::<int>), None);
assert_eq!(x.and(None::<isize>), None);
}
#[test]
fn test_and_then() {
let x: Option<int> = Some(1);
let x: Option<isize> = Some(1);
assert_eq!(x.and_then(|x| Some(x + 1)), Some(2));
assert_eq!(x.and_then(|_| None::<int>), None);
assert_eq!(x.and_then(|_| None::<isize>), None);
let x: Option<int> = None;
let x: Option<isize> = None;
assert_eq!(x.and_then(|x| Some(x + 1)), None);
assert_eq!(x.and_then(|_| None::<int>), None);
assert_eq!(x.and_then(|_| None::<isize>), None);
}
#[test]
fn test_or() {
let x: Option<int> = Some(1);
let x: Option<isize> = Some(1);
assert_eq!(x.or(Some(2)), Some(1));
assert_eq!(x.or(None), Some(1));
let x: Option<int> = None;
let x: Option<isize> = None;
assert_eq!(x.or(Some(2)), Some(2));
assert_eq!(x.or(None), None);
}
#[test]
fn test_or_else() {
let x: Option<int> = Some(1);
let x: Option<isize> = Some(1);
assert_eq!(x.or_else(|| Some(2)), Some(1));
assert_eq!(x.or_else(|| None), Some(1));
let x: Option<int> = None;
let x: Option<isize> = None;
assert_eq!(x.or_else(|| Some(2)), Some(2));
assert_eq!(x.or_else(|| None), None);
}
@ -141,7 +141,7 @@ fn test_unwrap() {
#[test]
#[should_panic]
fn test_unwrap_panic1() {
let x: Option<int> = None;
let x: Option<isize> = None;
x.unwrap();
}
@ -154,19 +154,19 @@ fn test_unwrap_panic2() {
#[test]
fn test_unwrap_or() {
let x: Option<int> = Some(1);
let x: Option<isize> = Some(1);
assert_eq!(x.unwrap_or(2), 1);
let x: Option<int> = None;
let x: Option<isize> = None;
assert_eq!(x.unwrap_or(2), 2);
}
#[test]
fn test_unwrap_or_else() {
let x: Option<int> = Some(1);
let x: Option<isize> = Some(1);
assert_eq!(x.unwrap_or_else(|| 2), 1);
let x: Option<int> = None;
let x: Option<isize> = None;
assert_eq!(x.unwrap_or_else(|| 2), 2);
}
@ -223,13 +223,13 @@ fn test_ord() {
/* FIXME(#20575)
#[test]
fn test_collect() {
let v: Option<Vec<int>> = (0..0).map(|_| Some(0)).collect();
let v: Option<Vec<isize>> = (0..0).map(|_| Some(0)).collect();
assert!(v == Some(vec![]));
let v: Option<Vec<int>> = (0..3).map(|x| Some(x)).collect();
let v: Option<Vec<isize>> = (0..3).map(|x| Some(x)).collect();
assert!(v == Some(vec![0, 1, 2]));
let v: Option<Vec<int>> = (0..3).map(|x| {
let v: Option<Vec<isize>> = (0..3).map(|x| {
if x > 1 { None } else { Some(x) }
}).collect();
assert!(v == None);

View File

@ -16,12 +16,12 @@ use std::iter::repeat;
fn test() {
unsafe {
struct Pair {
fst: int,
snd: int
fst: isize,
snd: isize
};
let mut p = Pair {fst: 10, snd: 20};
let pptr: *mut Pair = &mut p;
let iptr: *mut int = mem::transmute(pptr);
let iptr: *mut isize = mem::transmute(pptr);
assert_eq!(*iptr, 10);
*iptr = 30;
assert_eq!(*iptr, 30);
@ -55,13 +55,13 @@ fn test() {
#[test]
fn test_is_null() {
let p: *const int = null();
let p: *const isize = null();
assert!(p.is_null());
let q = unsafe { p.offset(1) };
assert!(!q.is_null());
let mp: *mut int = null_mut();
let mp: *mut isize = null_mut();
assert!(mp.is_null());
let mq = unsafe { mp.offset(1) };
@ -71,22 +71,22 @@ fn test_is_null() {
#[test]
fn test_as_ref() {
unsafe {
let p: *const int = null();
let p: *const isize = null();
assert_eq!(p.as_ref(), None);
let q: *const int = &2;
let q: *const isize = &2;
assert_eq!(q.as_ref().unwrap(), &2);
let p: *mut int = null_mut();
let p: *mut isize = null_mut();
assert_eq!(p.as_ref(), None);
let q: *mut int = &mut 2;
let q: *mut isize = &mut 2;
assert_eq!(q.as_ref().unwrap(), &2);
// Lifetime inference
let u = 2isize;
{
let p = &u as *const int;
let p = &u as *const isize;
assert_eq!(p.as_ref().unwrap(), &2);
}
}
@ -95,16 +95,16 @@ fn test_as_ref() {
#[test]
fn test_as_mut() {
unsafe {
let p: *mut int = null_mut();
let p: *mut isize = null_mut();
assert!(p.as_mut() == None);
let q: *mut int = &mut 2;
let q: *mut isize = &mut 2;
assert!(q.as_mut().unwrap() == &mut 2);
// Lifetime inference
let mut u = 2isize;
{
let p = &mut u as *mut int;
let p = &mut u as *mut isize;
assert!(p.as_mut().unwrap() == &mut 2);
}
}
@ -143,7 +143,7 @@ fn test_ptr_subtraction() {
let ptr = xs.as_ptr();
while idx >= 0 {
assert_eq!(*(ptr.offset(idx as int)), idx as int);
assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
idx = idx - 1;
}

View File

@ -8,8 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn op1() -> Result<int, &'static str> { Ok(666) }
pub fn op2() -> Result<int, &'static str> { Err("sadface") }
pub fn op1() -> Result<isize, &'static str> { Ok(666) }
pub fn op2() -> Result<isize, &'static str> { Err("sadface") }
#[test]
pub fn test_and() {
@ -24,13 +24,13 @@ pub fn test_and() {
#[test]
pub fn test_and_then() {
assert_eq!(op1().and_then(|i| Ok::<int, &'static str>(i + 1)).unwrap(), 667);
assert_eq!(op1().and_then(|_| Err::<int, &'static str>("bad")).unwrap_err(),
assert_eq!(op1().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap(), 667);
assert_eq!(op1().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and_then(|i| Ok::<int, &'static str>(i + 1)).unwrap_err(),
assert_eq!(op2().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap_err(),
"sadface");
assert_eq!(op2().and_then(|_| Err::<int, &'static str>("bad")).unwrap_err(),
assert_eq!(op2().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"sadface");
}
@ -45,53 +45,53 @@ pub fn test_or() {
#[test]
pub fn test_or_else() {
assert_eq!(op1().or_else(|_| Ok::<int, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or_else(|e| Err::<int, &'static str>(e)).unwrap(), 666);
assert_eq!(op1().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or_else(|e| Err::<isize, &'static str>(e)).unwrap(), 666);
assert_eq!(op2().or_else(|_| Ok::<int, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or_else(|e| Err::<int, &'static str>(e)).unwrap_err(),
assert_eq!(op2().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or_else(|e| Err::<isize, &'static str>(e)).unwrap_err(),
"sadface");
}
#[test]
pub fn test_impl_map() {
assert!(Ok::<int, int>(1).map(|x| x + 1) == Ok(2));
assert!(Err::<int, int>(1).map(|x| x + 1) == Err(1));
assert!(Ok::<isize, isize>(1).map(|x| x + 1) == Ok(2));
assert!(Err::<isize, isize>(1).map(|x| x + 1) == Err(1));
}
#[test]
pub fn test_impl_map_err() {
assert!(Ok::<int, int>(1).map_err(|x| x + 1) == Ok(1));
assert!(Err::<int, int>(1).map_err(|x| x + 1) == Err(2));
assert!(Ok::<isize, isize>(1).map_err(|x| x + 1) == Ok(1));
assert!(Err::<isize, isize>(1).map_err(|x| x + 1) == Err(2));
}
/* FIXME(#20575)
#[test]
fn test_collect() {
let v: Result<Vec<int>, ()> = (0..0).map(|_| Ok::<int, ()>(0)).collect();
let v: Result<Vec<isize>, ()> = (0..0).map(|_| Ok::<isize, ()>(0)).collect();
assert!(v == Ok(vec![]));
let v: Result<Vec<int>, ()> = (0..3).map(|x| Ok::<int, ()>(x)).collect();
let v: Result<Vec<isize>, ()> = (0..3).map(|x| Ok::<isize, ()>(x)).collect();
assert!(v == Ok(vec![0, 1, 2]));
let v: Result<Vec<int>, int> = (0..3).map(|x| {
let v: Result<Vec<isize>, isize> = (0..3).map(|x| {
if x > 1 { Err(x) } else { Ok(x) }
}).collect();
assert!(v == Err(2));
// test that it does not take more elements than it needs
let mut functions: [Box<Fn() -> Result<(), int>>; 3] =
let mut functions: [Box<Fn() -> Result<(), isize>>; 3] =
[box || Ok(()), box || Err(1), box || panic!()];
let v: Result<Vec<()>, int> = functions.iter_mut().map(|f| (*f)()).collect();
let v: Result<Vec<()>, isize> = functions.iter_mut().map(|f| (*f)()).collect();
assert!(v == Err(1));
}
*/
#[test]
pub fn test_fmt_default() {
let ok: Result<int, &'static str> = Ok(100);
let err: Result<int, &'static str> = Err("Err");
let ok: Result<isize, &'static str> = Ok(100);
let err: Result<isize, &'static str> = Err("Err");
let s = format!("{:?}", ok);
assert_eq!(s, "Ok(100)");
@ -101,8 +101,8 @@ pub fn test_fmt_default() {
#[test]
pub fn test_unwrap_or() {
let ok: Result<int, &'static str> = Ok(100);
let ok_err: Result<int, &'static str> = Err("Err");
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("Err");
assert_eq!(ok.unwrap_or(50), 100);
assert_eq!(ok_err.unwrap_or(50), 50);
@ -110,7 +110,7 @@ pub fn test_unwrap_or() {
#[test]
pub fn test_unwrap_or_else() {
fn handler(msg: &'static str) -> int {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
@ -118,8 +118,8 @@ pub fn test_unwrap_or_else() {
}
}
let ok: Result<int, &'static str> = Ok(100);
let ok_err: Result<int, &'static str> = Err("I got this.");
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("I got this.");
assert_eq!(ok.unwrap_or_else(handler), 100);
assert_eq!(ok_err.unwrap_or_else(handler), 50);
@ -128,7 +128,7 @@ pub fn test_unwrap_or_else() {
#[test]
#[should_panic]
pub fn test_unwrap_or_else_panic() {
fn handler(msg: &'static str) -> int {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
@ -136,6 +136,6 @@ pub fn test_unwrap_or_else_panic() {
}
}
let bad_err: Result<int, &'static str> = Err("Unrecoverable mess.");
let _ : int = bad_err.unwrap_or_else(handler);
let bad_err: Result<isize, &'static str> = Err("Unrecoverable mess.");
let _ : isize = bad_err.unwrap_or_else(handler);
}

View File

@ -92,7 +92,6 @@
html_playground_url = "http://play.rust-lang.org/")]
#![deny(missing_docs)]
#![feature(int_uint)]
#![feature(staged_api)]
#![feature(str_words)]
#![feature(str_char)]
@ -311,7 +310,7 @@ impl Matches {
}
/// Returns the number of times an option was matched.
pub fn opt_count(&self, nm: &str) -> uint {
pub fn opt_count(&self, nm: &str) -> usize {
self.opt_vals(nm).len()
}
@ -389,7 +388,7 @@ fn is_arg(arg: &str) -> bool {
arg.len() > 1 && arg.as_bytes()[0] == b'-'
}
fn find_opt(opts: &[Opt], nm: Name) -> Option<uint> {
fn find_opt(opts: &[Opt], nm: Name) -> Option<usize> {
// Search main options.
let pos = opts.iter().position(|opt| opt.name == nm);
if pos.is_some() {
@ -587,7 +586,7 @@ pub fn getopts(args: &[String], optgrps: &[OptGroup]) -> Result {
let opts: Vec<Opt> = optgrps.iter().map(|x| x.long_to_short()).collect();
let n_opts = opts.len();
fn f(_x: uint) -> Vec<Optval> { return Vec::new(); }
fn f(_x: usize) -> Vec<Optval> { return Vec::new(); }
let mut vals: Vec<_> = (0..n_opts).map(f).collect();
let mut free: Vec<String> = Vec::new();
@ -873,7 +872,7 @@ enum LengthLimit {
///
/// Panics during iteration if the string contains a non-whitespace
/// sequence longer than the limit.
fn each_split_within<F>(ss: &str, lim: uint, mut it: F) -> bool where
fn each_split_within<F>(ss: &str, lim: usize, mut it: F) -> bool where
F: FnMut(&str) -> bool
{
// Just for fun, let's write this as a state machine:
@ -892,7 +891,7 @@ fn each_split_within<F>(ss: &str, lim: uint, mut it: F) -> bool where
lim = fake_i;
}
let mut machine = |cont: &mut bool, (i, c): (uint, char)| -> bool {
let mut machine = |cont: &mut bool, (i, c): (usize, char)| -> bool {
let whitespace = if c.is_whitespace() { Ws } else { Cr };
let limit = if (i - slice_start + 1) <= lim { UnderLim } else { OverLim };
@ -954,7 +953,7 @@ fn each_split_within<F>(ss: &str, lim: uint, mut it: F) -> bool where
#[test]
fn test_split_within() {
fn t(s: &str, i: uint, u: &[String]) {
fn t(s: &str, i: usize, u: &[String]) {
let mut v = Vec::new();
each_split_within(s, i, |s| { v.push(s.to_string()); true });
assert!(v.iter().zip(u.iter()).all(|(a,b)| a == b));

View File

@ -281,7 +281,6 @@
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(int_uint)]
#![feature(collections)]
#![feature(into_cow)]

View File

@ -172,7 +172,6 @@
#![feature(alloc)]
#![feature(staged_api)]
#![feature(box_syntax)]
#![feature(int_uint)]
#![feature(core)]
#![feature(std_misc)]
@ -246,7 +245,7 @@ pub struct LogLevel(pub u32);
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let LogLevel(level) = *self;
match LOG_LEVEL_NAMES.get(level as uint - 1) {
match LOG_LEVEL_NAMES.get(level as usize - 1) {
Some(ref name) => fmt::Display::fmt(name, fmt),
None => fmt::Display::fmt(&level, fmt)
}
@ -289,7 +288,7 @@ pub fn log(level: u32, loc: &'static LogLocation, args: fmt::Arguments) {
// is one.
unsafe {
let _g = LOCK.lock();
match FILTER as uint {
match FILTER as usize {
0 => {}
1 => panic!("cannot log after main thread has exited"),
n => {
@ -383,8 +382,8 @@ pub fn mod_enabled(level: u32, module: &str) -> bool {
let _g = LOCK.lock();
unsafe {
assert!(DIRECTIVES as uint != 0);
assert!(DIRECTIVES as uint != 1,
assert!(DIRECTIVES as usize != 0);
assert!(DIRECTIVES as usize != 1,
"cannot log after the main thread has exited");
enabled(level, module, (*DIRECTIVES).iter())

View File

@ -15,9 +15,9 @@ use core::num::Int;
use core::num::wrapping::WrappingOps;
use {Rng, SeedableRng, Rand};
const KEY_WORDS : uint = 8; // 8 words for the 256-bit key
const STATE_WORDS : uint = 16;
const CHACHA_ROUNDS: uint = 20; // Cryptographically secure from 8 upwards as of this writing
const KEY_WORDS : usize = 8; // 8 words for the 256-bit key
const STATE_WORDS : usize = 16;
const CHACHA_ROUNDS: usize = 20; // Cryptographically secure from 8 upwards as of this writing
/// A random number generator that uses the ChaCha20 algorithm [1].
///
@ -32,7 +32,7 @@ const CHACHA_ROUNDS: uint = 20; // Cryptographically secure from 8 upwards as of
pub struct ChaChaRng {
buffer: [u32; STATE_WORDS], // Internal buffer of output
state: [u32; STATE_WORDS], // Initial state
index: uint, // Index into state
index: usize, // Index into state
}
static EMPTY: ChaChaRng = ChaChaRng {

View File

@ -76,7 +76,7 @@ impl<Sup: Rand> IndependentSample<Sup> for RandSample<Sup> {
/// A value with a particular weight for use with `WeightedChoice`.
pub struct Weighted<T> {
/// The numerical weight of this item
pub weight: uint,
pub weight: usize,
/// The actual item which is being weighted
pub item: T,
}
@ -88,7 +88,7 @@ pub struct Weighted<T> {
///
/// The `Clone` restriction is a limitation of the `Sample` and
/// `IndependentSample` traits. Note that `&T` is (cheaply) `Clone` for
/// all `T`, as is `uint`, so one can store references or indices into
/// all `T`, as is `usize`, so one can store references or indices into
/// another vector.
///
/// # Examples
@ -110,7 +110,7 @@ pub struct Weighted<T> {
/// ```
pub struct WeightedChoice<'a, T:'a> {
items: &'a mut [Weighted<T>],
weight_range: Range<uint>
weight_range: Range<usize>
}
impl<'a, T: Clone> WeightedChoice<'a, T> {
@ -119,7 +119,7 @@ impl<'a, T: Clone> WeightedChoice<'a, T> {
/// Panics if:
/// - `v` is empty
/// - the total weight is 0
/// - the total weight is larger than a `uint` can contain.
/// - the total weight is larger than a `usize` can contain.
pub fn new(items: &'a mut [Weighted<T>]) -> WeightedChoice<'a, T> {
// strictly speaking, this is subsumed by the total weight == 0 case
assert!(!items.is_empty(), "WeightedChoice::new called with no items");
@ -133,7 +133,7 @@ impl<'a, T: Clone> WeightedChoice<'a, T> {
running_total = match running_total.checked_add(item.weight) {
Some(n) => n,
None => panic!("WeightedChoice::new called with a total weight \
larger than a uint can contain")
larger than a usize can contain")
};
item.weight = running_total;
@ -238,7 +238,7 @@ fn ziggurat<R: Rng, P, Z>(
// this may be slower than it would be otherwise.)
// FIXME: investigate/optimise for the above.
let bits: u64 = rng.gen();
let i = (bits & 0xff) as uint;
let i = (bits & 0xff) as usize;
let f = (bits >> 11) as f64 / SCALE;
// u is either U(-1, 1) or U(0, 1) depending on if this is a
@ -270,7 +270,7 @@ mod tests {
use super::{RandSample, WeightedChoice, Weighted, Sample, IndependentSample};
#[derive(PartialEq, Debug)]
struct ConstRand(uint);
struct ConstRand(usize);
impl Rand for ConstRand {
fn rand<R: Rng>(_: &mut R) -> ConstRand {
ConstRand(0)
@ -352,7 +352,7 @@ mod tests {
#[test] #[should_panic]
fn test_weighted_choice_no_items() {
WeightedChoice::<int>::new(&mut []);
WeightedChoice::<isize>::new(&mut []);
}
#[test] #[should_panic]
fn test_weighted_choice_zero_weight() {
@ -361,7 +361,7 @@ mod tests {
}
#[test] #[should_panic]
fn test_weighted_choice_weight_overflows() {
let x = (-1) as uint / 2; // x + x + 2 is the overflow
let x = (-1) as usize / 2; // x + x + 2 is the overflow
WeightedChoice::new(&mut [Weighted { weight: x, item: 0 },
Weighted { weight: 1, item: 1 },
Weighted { weight: x, item: 2 },

View File

@ -138,12 +138,12 @@ integer_impl! { i8, u8 }
integer_impl! { i16, u16 }
integer_impl! { i32, u32 }
integer_impl! { i64, u64 }
integer_impl! { int, uint }
integer_impl! { isize, usize }
integer_impl! { u8, u8 }
integer_impl! { u16, u16 }
integer_impl! { u32, u32 }
integer_impl! { u64, u64 }
integer_impl! { uint, uint }
integer_impl! { usize, usize }
macro_rules! float_impl {
($ty:ty) => {
@ -204,8 +204,8 @@ mod tests {
)*
}}
}
t!(i8, i16, i32, i64, int,
u8, u16, u32, u64, uint)
t!(i8, i16, i32, i64, isize,
u8, u16, u32, u64, usize)
}
#[test]

View File

@ -24,7 +24,6 @@
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(int_uint)]
#![feature(no_std)]
#![no_std]
#![unstable(feature = "rand")]
@ -99,8 +98,8 @@ pub trait Rng : Sized {
/// See `Closed01` for the closed interval `[0,1]`, and
/// `Open01` for the open interval `(0,1)`.
fn next_f32(&mut self) -> f32 {
const MANTISSA_BITS: uint = 24;
const IGNORED_BITS: uint = 8;
const MANTISSA_BITS: usize = 24;
const IGNORED_BITS: usize = 8;
const SCALE: f32 = (1u64 << MANTISSA_BITS) as f32;
// using any more than `MANTISSA_BITS` bits will
@ -121,8 +120,8 @@ pub trait Rng : Sized {
/// See `Closed01` for the closed interval `[0,1]`, and
/// `Open01` for the open interval `(0,1)`.
fn next_f64(&mut self) -> f64 {
const MANTISSA_BITS: uint = 53;
const IGNORED_BITS: uint = 11;
const MANTISSA_BITS: usize = 53;
const IGNORED_BITS: usize = 11;
const SCALE: f64 = (1u64 << MANTISSA_BITS) as f64;
(self.next_u64() >> IGNORED_BITS) as f64 / SCALE
@ -189,7 +188,7 @@ pub trait Rng : Sized {
/// use std::rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// let x: uint = rng.gen();
/// let x: usize = rng.gen();
/// println!("{}", x);
/// println!("{:?}", rng.gen::<(f64, bool)>());
/// ```
@ -208,7 +207,7 @@ pub trait Rng : Sized {
/// use std::rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// let x = rng.gen_iter::<uint>().take(10).collect::<Vec<uint>>();
/// let x = rng.gen_iter::<usize>().take(10).collect::<Vec<usize>>();
/// println!("{:?}", x);
/// println!("{:?}", rng.gen_iter::<(f64, bool)>().take(5)
/// .collect::<Vec<(f64, bool)>>());
@ -236,7 +235,7 @@ pub trait Rng : Sized {
/// use std::rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// let n: uint = rng.gen_range(0, 10);
/// let n: usize = rng.gen_range(0, 10);
/// println!("{}", n);
/// let m: f64 = rng.gen_range(-40.0f64, 1.3e5f64);
/// println!("{}", m);
@ -257,7 +256,7 @@ pub trait Rng : Sized {
/// let mut rng = thread_rng();
/// println!("{}", rng.gen_weighted_bool(3));
/// ```
fn gen_weighted_bool(&mut self, n: uint) -> bool {
fn gen_weighted_bool(&mut self, n: usize) -> bool {
n <= 1 || self.gen_range(0, n) == 0
}

View File

@ -18,14 +18,14 @@ use core::default::Default;
/// How many bytes of entropy the underling RNG is allowed to generate
/// before it is reseeded.
const DEFAULT_GENERATION_THRESHOLD: uint = 32 * 1024;
const DEFAULT_GENERATION_THRESHOLD: usize = 32 * 1024;
/// A wrapper around any RNG which reseeds the underlying RNG after it
/// has generated a certain number of random bytes.
pub struct ReseedingRng<R, Rsdr> {
rng: R,
generation_threshold: uint,
bytes_generated: uint,
generation_threshold: usize,
bytes_generated: usize,
/// Controls the behaviour when reseeding the RNG.
pub reseeder: Rsdr,
}
@ -38,7 +38,7 @@ impl<R: Rng, Rsdr: Reseeder<R>> ReseedingRng<R, Rsdr> {
/// * `rng`: the random number generator to use.
/// * `generation_threshold`: the number of bytes of entropy at which to reseed the RNG.
/// * `reseeder`: the reseeding object to use.
pub fn new(rng: R, generation_threshold: uint, reseeder: Rsdr) -> ReseedingRng<R,Rsdr> {
pub fn new(rng: R, generation_threshold: usize, reseeder: Rsdr) -> ReseedingRng<R,Rsdr> {
ReseedingRng {
rng: rng,
generation_threshold: generation_threshold,
@ -213,7 +213,7 @@ mod test {
assert_eq!(string1, string2);
}
const FILL_BYTES_V_LEN: uint = 13579;
const FILL_BYTES_V_LEN: usize = 13579;
#[test]
fn test_rng_fill_bytes() {
let mut v = repeat(0).take(FILL_BYTES_V_LEN).collect::<Vec<_>>();

View File

@ -27,7 +27,7 @@
//! where the tag number is ORed with 0xf000. (E.g. tag 0x123 = `f1 23`)
//!
//! **Lengths** encode the length of the following data.
//! It is a variable-length unsigned int, and one of the following forms:
//! It is a variable-length unsigned isize, and one of the following forms:
//!
//! - `80` through `fe` for lengths up to 0x7e;
//! - `40 ff` through `7f ff` for lengths up to 0x3fff;
@ -125,7 +125,6 @@
#![feature(io)]
#![feature(core)]
#![feature(int_uint)]
#![feature(rustc_private)]
#![feature(staged_api)]
@ -146,8 +145,8 @@ use std::fmt;
#[derive(Clone, Copy)]
pub struct Doc<'a> {
pub data: &'a [u8],
pub start: uint,
pub end: uint,
pub start: usize,
pub end: usize,
}
impl<'doc> Doc<'doc> {
@ -155,7 +154,7 @@ impl<'doc> Doc<'doc> {
Doc { data: data, start: 0, end: data.len() }
}
pub fn get<'a>(&'a self, tag: uint) -> Doc<'a> {
pub fn get<'a>(&'a self, tag: usize) -> Doc<'a> {
reader::get_doc(*self, tag)
}
@ -173,7 +172,7 @@ impl<'doc> Doc<'doc> {
}
pub struct TaggedDoc<'a> {
tag: uint,
tag: usize,
pub doc: Doc<'a>,
}
@ -208,8 +207,8 @@ pub enum EbmlEncoderTag {
EsOpaque = 0x17,
}
const NUM_TAGS: uint = 0x1000;
const NUM_IMPLICIT_TAGS: uint = 0x0e;
const NUM_TAGS: usize = 0x1000;
const NUM_IMPLICIT_TAGS: usize = 0x0e;
static TAG_IMPLICIT_LEN: [i8; NUM_IMPLICIT_TAGS] = [
1, 2, 4, 8, // EsU*
@ -222,8 +221,8 @@ static TAG_IMPLICIT_LEN: [i8; NUM_IMPLICIT_TAGS] = [
#[derive(Debug)]
pub enum Error {
IntTooBig(uint),
InvalidTag(uint),
IntTooBig(usize),
InvalidTag(usize),
Expected(String),
IoError(std::io::Error),
ApplicationError(String)
@ -270,16 +269,16 @@ pub mod reader {
#[derive(Copy)]
pub struct Res {
pub val: uint,
pub next: uint
pub val: usize,
pub next: usize
}
pub fn tag_at(data: &[u8], start: uint) -> DecodeResult<Res> {
let v = data[start] as uint;
pub fn tag_at(data: &[u8], start: usize) -> DecodeResult<Res> {
let v = data[start] as usize;
if v < 0xf0 {
Ok(Res { val: v, next: start + 1 })
} else if v > 0xf0 {
Ok(Res { val: ((v & 0xf) << 8) | data[start + 1] as uint, next: start + 2 })
Ok(Res { val: ((v & 0xf) << 8) | data[start + 1] as usize, next: start + 2 })
} else {
// every tag starting with byte 0xf0 is an overlong form, which is prohibited.
Err(InvalidTag(v))
@ -287,33 +286,33 @@ pub mod reader {
}
#[inline(never)]
fn vuint_at_slow(data: &[u8], start: uint) -> DecodeResult<Res> {
fn vuint_at_slow(data: &[u8], start: usize) -> DecodeResult<Res> {
let a = data[start];
if a & 0x80 != 0 {
return Ok(Res {val: (a & 0x7f) as uint, next: start + 1});
return Ok(Res {val: (a & 0x7f) as usize, next: start + 1});
}
if a & 0x40 != 0 {
return Ok(Res {val: ((a & 0x3f) as uint) << 8 |
(data[start + 1] as uint),
return Ok(Res {val: ((a & 0x3f) as usize) << 8 |
(data[start + 1] as usize),
next: start + 2});
}
if a & 0x20 != 0 {
return Ok(Res {val: ((a & 0x1f) as uint) << 16 |
(data[start + 1] as uint) << 8 |
(data[start + 2] as uint),
return Ok(Res {val: ((a & 0x1f) as usize) << 16 |
(data[start + 1] as usize) << 8 |
(data[start + 2] as usize),
next: start + 3});
}
if a & 0x10 != 0 {
return Ok(Res {val: ((a & 0x0f) as uint) << 24 |
(data[start + 1] as uint) << 16 |
(data[start + 2] as uint) << 8 |
(data[start + 3] as uint),
return Ok(Res {val: ((a & 0x0f) as usize) << 24 |
(data[start + 1] as usize) << 16 |
(data[start + 2] as usize) << 8 |
(data[start + 3] as usize),
next: start + 4});
}
Err(IntTooBig(a as uint))
Err(IntTooBig(a as usize))
}
pub fn vuint_at(data: &[u8], start: uint) -> DecodeResult<Res> {
pub fn vuint_at(data: &[u8], start: usize) -> DecodeResult<Res> {
if data.len() - start < 4 {
return vuint_at_slow(data, start);
}
@ -337,7 +336,7 @@ pub mod reader {
// most significant bit is set etc. we can replace up to three
// "and+branch" with a single table lookup which gives us a measured
// speedup of around 2x on x86_64.
static SHIFT_MASK_TABLE: [(uint, u32); 16] = [
static SHIFT_MASK_TABLE: [(usize, u32); 16] = [
(0, 0x0), (0, 0x0fffffff),
(8, 0x1fffff), (8, 0x1fffff),
(16, 0x3fff), (16, 0x3fff), (16, 0x3fff), (16, 0x3fff),
@ -346,10 +345,10 @@ pub mod reader {
];
unsafe {
let ptr = data.as_ptr().offset(start as int) as *const u32;
let ptr = data.as_ptr().offset(start as isize) as *const u32;
let val = Int::from_be(*ptr);
let i = (val >> 28) as uint;
let i = (val >> 28) as usize;
let (shift, mask) = SHIFT_MASK_TABLE[i];
Ok(Res {
val: ((val >> shift) & mask) as usize,
@ -360,13 +359,13 @@ pub mod reader {
pub fn tag_len_at(data: &[u8], tag: Res) -> DecodeResult<Res> {
if tag.val < NUM_IMPLICIT_TAGS && TAG_IMPLICIT_LEN[tag.val] >= 0 {
Ok(Res { val: TAG_IMPLICIT_LEN[tag.val] as uint, next: tag.next })
Ok(Res { val: TAG_IMPLICIT_LEN[tag.val] as usize, next: tag.next })
} else {
vuint_at(data, tag.next)
}
}
pub fn doc_at<'a>(data: &'a [u8], start: uint) -> DecodeResult<TaggedDoc<'a>> {
pub fn doc_at<'a>(data: &'a [u8], start: usize) -> DecodeResult<TaggedDoc<'a>> {
let elt_tag = try!(tag_at(data, start));
let elt_size = try!(tag_len_at(data, elt_tag));
let end = elt_size.next + elt_size.val;
@ -376,7 +375,7 @@ pub mod reader {
})
}
pub fn maybe_get_doc<'a>(d: Doc<'a>, tg: uint) -> Option<Doc<'a>> {
pub fn maybe_get_doc<'a>(d: Doc<'a>, tg: usize) -> Option<Doc<'a>> {
let mut pos = d.start;
while pos < d.end {
let elt_tag = try_or!(tag_at(d.data, pos), None);
@ -390,7 +389,7 @@ pub mod reader {
None
}
pub fn get_doc<'a>(d: Doc<'a>, tg: uint) -> Doc<'a> {
pub fn get_doc<'a>(d: Doc<'a>, tg: usize) -> Doc<'a> {
match maybe_get_doc(d, tg) {
Some(d) => d,
None => {
@ -401,7 +400,7 @@ pub mod reader {
}
pub fn docs<F>(d: Doc, mut it: F) -> bool where
F: FnMut(uint, Doc) -> bool,
F: FnMut(usize, Doc) -> bool,
{
let mut pos = d.start;
while pos < d.end {
@ -416,7 +415,7 @@ pub mod reader {
return true;
}
pub fn tagged_docs<F>(d: Doc, tg: uint, mut it: F) -> bool where
pub fn tagged_docs<F>(d: Doc, tg: usize, mut it: F) -> bool where
F: FnMut(Doc) -> bool,
{
let mut pos = d.start;
@ -475,7 +474,7 @@ pub mod reader {
pub struct Decoder<'a> {
parent: Doc<'a>,
pos: uint,
pos: usize,
}
impl<'doc> Decoder<'doc> {
@ -501,7 +500,7 @@ pub mod reader {
r_tag,
r_doc.start,
r_doc.end);
if r_tag != (exp_tag as uint) {
if r_tag != (exp_tag as usize) {
return Err(Expected(format!("expected EBML doc with tag {:?} but \
found tag {:?}", exp_tag, r_tag)));
}
@ -528,7 +527,7 @@ pub mod reader {
Ok(r)
}
fn _next_sub(&mut self) -> DecodeResult<uint> {
fn _next_sub(&mut self) -> DecodeResult<usize> {
// empty vector/map optimization
if self.parent.is_empty() {
return Ok(0);
@ -536,10 +535,10 @@ pub mod reader {
let TaggedDoc { tag: r_tag, doc: r_doc } =
try!(doc_at(self.parent.data, self.pos));
let r = if r_tag == (EsSub8 as uint) {
doc_as_u8(r_doc) as uint
} else if r_tag == (EsSub32 as uint) {
doc_as_u32(r_doc) as uint
let r = if r_tag == (EsSub8 as usize) {
doc_as_u8(r_doc) as usize
} else if r_tag == (EsSub32 as usize) {
doc_as_u32(r_doc) as usize
} else {
return Err(Expected(format!("expected EBML doc with tag {:?} or {:?} but \
found tag {:?}", EsSub8, EsSub32, r_tag)));
@ -568,8 +567,8 @@ pub mod reader {
let TaggedDoc { tag: r_tag, doc: r_doc } =
try!(doc_at(self.parent.data, self.pos));
let r = if first_tag as uint <= r_tag && r_tag <= last_tag as uint {
match r_tag - first_tag as uint {
let r = if first_tag as usize <= r_tag && r_tag <= last_tag as usize {
match r_tag - first_tag as usize {
0 => doc_as_u8(r_doc) as u64,
1 => doc_as_u16(r_doc) as u64,
2 => doc_as_u32(r_doc) as u64,
@ -615,12 +614,12 @@ pub mod reader {
fn read_u32(&mut self) -> DecodeResult<u32> { Ok(try!(self._next_int(EsU8, EsU32)) as u32) }
fn read_u16(&mut self) -> DecodeResult<u16> { Ok(try!(self._next_int(EsU8, EsU16)) as u16) }
fn read_u8(&mut self) -> DecodeResult<u8> { Ok(doc_as_u8(try!(self.next_doc(EsU8)))) }
fn read_uint(&mut self) -> DecodeResult<uint> {
fn read_uint(&mut self) -> DecodeResult<usize> {
let v = try!(self._next_int(EsU8, EsU64));
if v > (::std::usize::MAX as u64) {
Err(IntTooBig(v as uint))
Err(IntTooBig(v as usize))
} else {
Ok(v as uint)
Ok(v as usize)
}
}
@ -628,13 +627,13 @@ pub mod reader {
fn read_i32(&mut self) -> DecodeResult<i32> { Ok(try!(self._next_int(EsI8, EsI32)) as i32) }
fn read_i16(&mut self) -> DecodeResult<i16> { Ok(try!(self._next_int(EsI8, EsI16)) as i16) }
fn read_i8(&mut self) -> DecodeResult<i8> { Ok(doc_as_u8(try!(self.next_doc(EsI8))) as i8) }
fn read_int(&mut self) -> DecodeResult<int> {
fn read_int(&mut self) -> DecodeResult<isize> {
let v = try!(self._next_int(EsI8, EsI64)) as i64;
if v > (isize::MAX as i64) || v < (isize::MIN as i64) {
debug!("FIXME \\#6122: Removing this makes this function miscompile");
Err(IntTooBig(v as uint))
Err(IntTooBig(v as usize))
} else {
Ok(v as int)
Ok(v as isize)
}
}
@ -678,7 +677,7 @@ pub mod reader {
fn read_enum_variant<T, F>(&mut self, _: &[&str],
mut f: F) -> DecodeResult<T>
where F: FnMut(&mut Decoder<'doc>, uint) -> DecodeResult<T>,
where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult<T>,
{
debug!("read_enum_variant()");
let idx = try!(self._next_sub());
@ -687,7 +686,7 @@ pub mod reader {
f(self, idx)
}
fn read_enum_variant_arg<T, F>(&mut self, idx: uint, f: F) -> DecodeResult<T> where
fn read_enum_variant_arg<T, F>(&mut self, idx: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_enum_variant_arg(idx={})", idx);
@ -696,7 +695,7 @@ pub mod reader {
fn read_enum_struct_variant<T, F>(&mut self, _: &[&str],
mut f: F) -> DecodeResult<T>
where F: FnMut(&mut Decoder<'doc>, uint) -> DecodeResult<T>,
where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult<T>,
{
debug!("read_enum_struct_variant()");
let idx = try!(self._next_sub());
@ -707,7 +706,7 @@ pub mod reader {
fn read_enum_struct_variant_field<T, F>(&mut self,
name: &str,
idx: uint,
idx: usize,
f: F)
-> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
@ -716,21 +715,21 @@ pub mod reader {
f(self)
}
fn read_struct<T, F>(&mut self, name: &str, _: uint, f: F) -> DecodeResult<T> where
fn read_struct<T, F>(&mut self, name: &str, _: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_struct(name={})", name);
f(self)
}
fn read_struct_field<T, F>(&mut self, name: &str, idx: uint, f: F) -> DecodeResult<T> where
fn read_struct_field<T, F>(&mut self, name: &str, idx: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_struct_field(name={}, idx={})", name, idx);
f(self)
}
fn read_tuple<T, F>(&mut self, tuple_len: uint, f: F) -> DecodeResult<T> where
fn read_tuple<T, F>(&mut self, tuple_len: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_tuple()");
@ -744,14 +743,14 @@ pub mod reader {
})
}
fn read_tuple_arg<T, F>(&mut self, idx: uint, f: F) -> DecodeResult<T> where
fn read_tuple_arg<T, F>(&mut self, idx: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_tuple_arg(idx={})", idx);
self.read_seq_elt(idx, f)
}
fn read_tuple_struct<T, F>(&mut self, name: &str, len: uint, f: F) -> DecodeResult<T> where
fn read_tuple_struct<T, F>(&mut self, name: &str, len: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_tuple_struct(name={})", name);
@ -759,7 +758,7 @@ pub mod reader {
}
fn read_tuple_struct_arg<T, F>(&mut self,
idx: uint,
idx: usize,
f: F)
-> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
@ -786,7 +785,7 @@ pub mod reader {
}
fn read_seq<T, F>(&mut self, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>, uint) -> DecodeResult<T>,
F: FnOnce(&mut Decoder<'doc>, usize) -> DecodeResult<T>,
{
debug!("read_seq()");
self.push_doc(EsVec, move |d| {
@ -796,7 +795,7 @@ pub mod reader {
})
}
fn read_seq_elt<T, F>(&mut self, idx: uint, f: F) -> DecodeResult<T> where
fn read_seq_elt<T, F>(&mut self, idx: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_seq_elt(idx={})", idx);
@ -804,7 +803,7 @@ pub mod reader {
}
fn read_map<T, F>(&mut self, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>, uint) -> DecodeResult<T>,
F: FnOnce(&mut Decoder<'doc>, usize) -> DecodeResult<T>,
{
debug!("read_map()");
self.push_doc(EsMap, move |d| {
@ -814,14 +813,14 @@ pub mod reader {
})
}
fn read_map_elt_key<T, F>(&mut self, idx: uint, f: F) -> DecodeResult<T> where
fn read_map_elt_key<T, F>(&mut self, idx: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_map_elt_key(idx={})", idx);
self.push_doc(EsMapKey, f)
}
fn read_map_elt_val<T, F>(&mut self, idx: uint, f: F) -> DecodeResult<T> where
fn read_map_elt_val<T, F>(&mut self, idx: usize, f: F) -> DecodeResult<T> where
F: FnOnce(&mut Decoder<'doc>) -> DecodeResult<T>,
{
debug!("read_map_elt_val(idx={})", idx);
@ -859,7 +858,7 @@ pub mod writer {
relax_limit: u64, // do not move encoded bytes before this position
}
fn write_tag<W: Write>(w: &mut W, n: uint) -> EncodeResult {
fn write_tag<W: Write>(w: &mut W, n: usize) -> EncodeResult {
if n < 0xf0 {
w.write_all(&[n as u8])
} else if 0x100 <= n && n < NUM_TAGS {
@ -870,7 +869,7 @@ pub mod writer {
}
}
fn write_sized_vuint<W: Write>(w: &mut W, n: uint, size: uint) -> EncodeResult {
fn write_sized_vuint<W: Write>(w: &mut W, n: usize, size: usize) -> EncodeResult {
match size {
1 => w.write_all(&[0x80 | (n as u8)]),
2 => w.write_all(&[0x40 | ((n >> 8) as u8), n as u8]),
@ -879,16 +878,16 @@ pub mod writer {
4 => w.write_all(&[0x10 | ((n >> 24) as u8), (n >> 16) as u8,
(n >> 8) as u8, n as u8]),
_ => Err(io::Error::new(io::ErrorKind::Other,
"int too big", Some(n.to_string())))
"isize too big", Some(n.to_string())))
}
}
fn write_vuint<W: Write>(w: &mut W, n: uint) -> EncodeResult {
fn write_vuint<W: Write>(w: &mut W, n: usize) -> EncodeResult {
if n < 0x7f { return write_sized_vuint(w, n, 1); }
if n < 0x4000 { return write_sized_vuint(w, n, 2); }
if n < 0x200000 { return write_sized_vuint(w, n, 3); }
if n < 0x10000000 { return write_sized_vuint(w, n, 4); }
Err(io::Error::new(io::ErrorKind::Other, "int too big",
Err(io::Error::new(io::ErrorKind::Other, "isize too big",
Some(n.to_string())))
}
@ -910,7 +909,7 @@ pub mod writer {
}
}
pub fn start_tag(&mut self, tag_id: uint) -> EncodeResult {
pub fn start_tag(&mut self, tag_id: usize) -> EncodeResult {
debug!("Start tag {:?}", tag_id);
assert!(tag_id >= NUM_IMPLICIT_TAGS);
@ -932,13 +931,13 @@ pub mod writer {
// relax the size encoding for small tags (bigger tags are costly to move).
// we should never try to move the stable positions, however.
const RELAX_MAX_SIZE: uint = 0x100;
const RELAX_MAX_SIZE: usize = 0x100;
if size <= RELAX_MAX_SIZE && last_size_pos >= self.relax_limit {
// we can't alter the buffer in place, so have a temporary buffer
let mut buf = [0u8; RELAX_MAX_SIZE];
{
let last_size_pos = last_size_pos as usize;
let data = &self.writer.get_ref()[last_size_pos+4..cur_pos as uint];
let data = &self.writer.get_ref()[last_size_pos+4..cur_pos as usize];
bytes::copy_memory(&mut buf, data);
}
@ -955,7 +954,7 @@ pub mod writer {
Ok(())
}
pub fn wr_tag<F>(&mut self, tag_id: uint, blk: F) -> EncodeResult where
pub fn wr_tag<F>(&mut self, tag_id: usize, blk: F) -> EncodeResult where
F: FnOnce() -> EncodeResult,
{
try!(self.start_tag(tag_id));
@ -963,90 +962,90 @@ pub mod writer {
self.end_tag()
}
pub fn wr_tagged_bytes(&mut self, tag_id: uint, b: &[u8]) -> EncodeResult {
pub fn wr_tagged_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult {
assert!(tag_id >= NUM_IMPLICIT_TAGS);
try!(write_tag(self.writer, tag_id));
try!(write_vuint(self.writer, b.len()));
self.writer.write_all(b)
}
pub fn wr_tagged_u64(&mut self, tag_id: uint, v: u64) -> EncodeResult {
pub fn wr_tagged_u64(&mut self, tag_id: usize, v: u64) -> EncodeResult {
let bytes: [u8; 8] = unsafe { mem::transmute(v.to_be()) };
self.wr_tagged_bytes(tag_id, &bytes)
}
pub fn wr_tagged_u32(&mut self, tag_id: uint, v: u32) -> EncodeResult{
pub fn wr_tagged_u32(&mut self, tag_id: usize, v: u32) -> EncodeResult{
let bytes: [u8; 4] = unsafe { mem::transmute(v.to_be()) };
self.wr_tagged_bytes(tag_id, &bytes)
}
pub fn wr_tagged_u16(&mut self, tag_id: uint, v: u16) -> EncodeResult {
pub fn wr_tagged_u16(&mut self, tag_id: usize, v: u16) -> EncodeResult {
let bytes: [u8; 2] = unsafe { mem::transmute(v.to_be()) };
self.wr_tagged_bytes(tag_id, &bytes)
}
pub fn wr_tagged_u8(&mut self, tag_id: uint, v: u8) -> EncodeResult {
pub fn wr_tagged_u8(&mut self, tag_id: usize, v: u8) -> EncodeResult {
self.wr_tagged_bytes(tag_id, &[v])
}
pub fn wr_tagged_i64(&mut self, tag_id: uint, v: i64) -> EncodeResult {
pub fn wr_tagged_i64(&mut self, tag_id: usize, v: i64) -> EncodeResult {
self.wr_tagged_u64(tag_id, v as u64)
}
pub fn wr_tagged_i32(&mut self, tag_id: uint, v: i32) -> EncodeResult {
pub fn wr_tagged_i32(&mut self, tag_id: usize, v: i32) -> EncodeResult {
self.wr_tagged_u32(tag_id, v as u32)
}
pub fn wr_tagged_i16(&mut self, tag_id: uint, v: i16) -> EncodeResult {
pub fn wr_tagged_i16(&mut self, tag_id: usize, v: i16) -> EncodeResult {
self.wr_tagged_u16(tag_id, v as u16)
}
pub fn wr_tagged_i8(&mut self, tag_id: uint, v: i8) -> EncodeResult {
pub fn wr_tagged_i8(&mut self, tag_id: usize, v: i8) -> EncodeResult {
self.wr_tagged_bytes(tag_id, &[v as u8])
}
pub fn wr_tagged_str(&mut self, tag_id: uint, v: &str) -> EncodeResult {
pub fn wr_tagged_str(&mut self, tag_id: usize, v: &str) -> EncodeResult {
self.wr_tagged_bytes(tag_id, v.as_bytes())
}
// for auto-serialization
fn wr_tagged_raw_bytes(&mut self, tag_id: uint, b: &[u8]) -> EncodeResult {
fn wr_tagged_raw_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult {
try!(write_tag(self.writer, tag_id));
self.writer.write_all(b)
}
fn wr_tagged_raw_u64(&mut self, tag_id: uint, v: u64) -> EncodeResult {
fn wr_tagged_raw_u64(&mut self, tag_id: usize, v: u64) -> EncodeResult {
let bytes: [u8; 8] = unsafe { mem::transmute(v.to_be()) };
self.wr_tagged_raw_bytes(tag_id, &bytes)
}
fn wr_tagged_raw_u32(&mut self, tag_id: uint, v: u32) -> EncodeResult{
fn wr_tagged_raw_u32(&mut self, tag_id: usize, v: u32) -> EncodeResult{
let bytes: [u8; 4] = unsafe { mem::transmute(v.to_be()) };
self.wr_tagged_raw_bytes(tag_id, &bytes)
}
fn wr_tagged_raw_u16(&mut self, tag_id: uint, v: u16) -> EncodeResult {
fn wr_tagged_raw_u16(&mut self, tag_id: usize, v: u16) -> EncodeResult {
let bytes: [u8; 2] = unsafe { mem::transmute(v.to_be()) };
self.wr_tagged_raw_bytes(tag_id, &bytes)
}
fn wr_tagged_raw_u8(&mut self, tag_id: uint, v: u8) -> EncodeResult {
fn wr_tagged_raw_u8(&mut self, tag_id: usize, v: u8) -> EncodeResult {
self.wr_tagged_raw_bytes(tag_id, &[v])
}
fn wr_tagged_raw_i64(&mut self, tag_id: uint, v: i64) -> EncodeResult {
fn wr_tagged_raw_i64(&mut self, tag_id: usize, v: i64) -> EncodeResult {
self.wr_tagged_raw_u64(tag_id, v as u64)
}
fn wr_tagged_raw_i32(&mut self, tag_id: uint, v: i32) -> EncodeResult {
fn wr_tagged_raw_i32(&mut self, tag_id: usize, v: i32) -> EncodeResult {
self.wr_tagged_raw_u32(tag_id, v as u32)
}
fn wr_tagged_raw_i16(&mut self, tag_id: uint, v: i16) -> EncodeResult {
fn wr_tagged_raw_i16(&mut self, tag_id: usize, v: i16) -> EncodeResult {
self.wr_tagged_raw_u16(tag_id, v as u16)
}
fn wr_tagged_raw_i8(&mut self, tag_id: uint, v: i8) -> EncodeResult {
fn wr_tagged_raw_i8(&mut self, tag_id: usize, v: i8) -> EncodeResult {
self.wr_tagged_raw_bytes(tag_id, &[v as u8])
}
@ -1073,11 +1072,11 @@ pub mod writer {
impl<'a> Encoder<'a> {
// used internally to emit things like the vector length and so on
fn _emit_tagged_sub(&mut self, v: uint) -> EncodeResult {
fn _emit_tagged_sub(&mut self, v: usize) -> EncodeResult {
if let Some(v) = v.to_u8() {
self.wr_tagged_raw_u8(EsSub8 as uint, v)
self.wr_tagged_raw_u8(EsSub8 as usize, v)
} else if let Some(v) = v.to_u32() {
self.wr_tagged_raw_u32(EsSub32 as uint, v)
self.wr_tagged_raw_u32(EsSub32 as usize, v)
} else {
Err(io::Error::new(io::ErrorKind::Other,
"length or variant id too big",
@ -1088,7 +1087,7 @@ pub mod writer {
pub fn emit_opaque<F>(&mut self, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder) -> EncodeResult,
{
try!(self.start_tag(EsOpaque as uint));
try!(self.start_tag(EsOpaque as usize));
try!(f(self));
self.end_tag()
}
@ -1101,88 +1100,88 @@ pub mod writer {
Ok(())
}
fn emit_uint(&mut self, v: uint) -> EncodeResult {
fn emit_uint(&mut self, v: usize) -> EncodeResult {
self.emit_u64(v as u64)
}
fn emit_u64(&mut self, v: u64) -> EncodeResult {
match v.to_u32() {
Some(v) => self.emit_u32(v),
None => self.wr_tagged_raw_u64(EsU64 as uint, v)
None => self.wr_tagged_raw_u64(EsU64 as usize, v)
}
}
fn emit_u32(&mut self, v: u32) -> EncodeResult {
match v.to_u16() {
Some(v) => self.emit_u16(v),
None => self.wr_tagged_raw_u32(EsU32 as uint, v)
None => self.wr_tagged_raw_u32(EsU32 as usize, v)
}
}
fn emit_u16(&mut self, v: u16) -> EncodeResult {
match v.to_u8() {
Some(v) => self.emit_u8(v),
None => self.wr_tagged_raw_u16(EsU16 as uint, v)
None => self.wr_tagged_raw_u16(EsU16 as usize, v)
}
}
fn emit_u8(&mut self, v: u8) -> EncodeResult {
self.wr_tagged_raw_u8(EsU8 as uint, v)
self.wr_tagged_raw_u8(EsU8 as usize, v)
}
fn emit_int(&mut self, v: int) -> EncodeResult {
fn emit_int(&mut self, v: isize) -> EncodeResult {
self.emit_i64(v as i64)
}
fn emit_i64(&mut self, v: i64) -> EncodeResult {
match v.to_i32() {
Some(v) => self.emit_i32(v),
None => self.wr_tagged_raw_i64(EsI64 as uint, v)
None => self.wr_tagged_raw_i64(EsI64 as usize, v)
}
}
fn emit_i32(&mut self, v: i32) -> EncodeResult {
match v.to_i16() {
Some(v) => self.emit_i16(v),
None => self.wr_tagged_raw_i32(EsI32 as uint, v)
None => self.wr_tagged_raw_i32(EsI32 as usize, v)
}
}
fn emit_i16(&mut self, v: i16) -> EncodeResult {
match v.to_i8() {
Some(v) => self.emit_i8(v),
None => self.wr_tagged_raw_i16(EsI16 as uint, v)
None => self.wr_tagged_raw_i16(EsI16 as usize, v)
}
}
fn emit_i8(&mut self, v: i8) -> EncodeResult {
self.wr_tagged_raw_i8(EsI8 as uint, v)
self.wr_tagged_raw_i8(EsI8 as usize, v)
}
fn emit_bool(&mut self, v: bool) -> EncodeResult {
self.wr_tagged_raw_u8(EsBool as uint, v as u8)
self.wr_tagged_raw_u8(EsBool as usize, v as u8)
}
fn emit_f64(&mut self, v: f64) -> EncodeResult {
let bits = unsafe { mem::transmute(v) };
self.wr_tagged_raw_u64(EsF64 as uint, bits)
self.wr_tagged_raw_u64(EsF64 as usize, bits)
}
fn emit_f32(&mut self, v: f32) -> EncodeResult {
let bits = unsafe { mem::transmute(v) };
self.wr_tagged_raw_u32(EsF32 as uint, bits)
self.wr_tagged_raw_u32(EsF32 as usize, bits)
}
fn emit_char(&mut self, v: char) -> EncodeResult {
self.wr_tagged_raw_u32(EsChar as uint, v as u32)
self.wr_tagged_raw_u32(EsChar as usize, v as u32)
}
fn emit_str(&mut self, v: &str) -> EncodeResult {
self.wr_tagged_str(EsStr as uint, v)
self.wr_tagged_str(EsStr as usize, v)
}
fn emit_enum<F>(&mut self, _name: &str, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
try!(self.start_tag(EsEnum as uint));
try!(self.start_tag(EsEnum as usize));
try!(f(self));
self.end_tag()
}
fn emit_enum_variant<F>(&mut self,
_: &str,
v_id: uint,
_: uint,
v_id: usize,
_: usize,
f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
@ -1190,7 +1189,7 @@ pub mod writer {
f(self)
}
fn emit_enum_variant_arg<F>(&mut self, _: uint, f: F) -> EncodeResult where
fn emit_enum_variant_arg<F>(&mut self, _: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
f(self)
@ -1198,8 +1197,8 @@ pub mod writer {
fn emit_enum_struct_variant<F>(&mut self,
v_name: &str,
v_id: uint,
cnt: uint,
v_id: usize,
cnt: usize,
f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
@ -1208,42 +1207,42 @@ pub mod writer {
fn emit_enum_struct_variant_field<F>(&mut self,
_: &str,
idx: uint,
idx: usize,
f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
self.emit_enum_variant_arg(idx, f)
}
fn emit_struct<F>(&mut self, _: &str, _len: uint, f: F) -> EncodeResult where
fn emit_struct<F>(&mut self, _: &str, _len: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
f(self)
}
fn emit_struct_field<F>(&mut self, _name: &str, _: uint, f: F) -> EncodeResult where
fn emit_struct_field<F>(&mut self, _name: &str, _: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
f(self)
}
fn emit_tuple<F>(&mut self, len: uint, f: F) -> EncodeResult where
fn emit_tuple<F>(&mut self, len: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
self.emit_seq(len, f)
}
fn emit_tuple_arg<F>(&mut self, idx: uint, f: F) -> EncodeResult where
fn emit_tuple_arg<F>(&mut self, idx: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
self.emit_seq_elt(idx, f)
}
fn emit_tuple_struct<F>(&mut self, _: &str, len: uint, f: F) -> EncodeResult where
fn emit_tuple_struct<F>(&mut self, _: &str, len: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
self.emit_seq(len, f)
}
fn emit_tuple_struct_arg<F>(&mut self, idx: uint, f: F) -> EncodeResult where
fn emit_tuple_struct_arg<F>(&mut self, idx: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
self.emit_seq_elt(idx, f)
@ -1264,56 +1263,56 @@ pub mod writer {
self.emit_enum_variant("Some", 1, 1, f)
}
fn emit_seq<F>(&mut self, len: uint, f: F) -> EncodeResult where
fn emit_seq<F>(&mut self, len: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
if len == 0 {
// empty vector optimization
return self.wr_tagged_bytes(EsVec as uint, &[]);
return self.wr_tagged_bytes(EsVec as usize, &[]);
}
try!(self.start_tag(EsVec as uint));
try!(self.start_tag(EsVec as usize));
try!(self._emit_tagged_sub(len));
try!(f(self));
self.end_tag()
}
fn emit_seq_elt<F>(&mut self, _idx: uint, f: F) -> EncodeResult where
fn emit_seq_elt<F>(&mut self, _idx: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
try!(self.start_tag(EsVecElt as uint));
try!(self.start_tag(EsVecElt as usize));
try!(f(self));
self.end_tag()
}
fn emit_map<F>(&mut self, len: uint, f: F) -> EncodeResult where
fn emit_map<F>(&mut self, len: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
if len == 0 {
// empty map optimization
return self.wr_tagged_bytes(EsMap as uint, &[]);
return self.wr_tagged_bytes(EsMap as usize, &[]);
}
try!(self.start_tag(EsMap as uint));
try!(self.start_tag(EsMap as usize));
try!(self._emit_tagged_sub(len));
try!(f(self));
self.end_tag()
}
fn emit_map_elt_key<F>(&mut self, _idx: uint, f: F) -> EncodeResult where
fn emit_map_elt_key<F>(&mut self, _idx: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
try!(self.start_tag(EsMapKey as uint));
try!(self.start_tag(EsMapKey as usize));
try!(f(self));
self.end_tag()
}
fn emit_map_elt_val<F>(&mut self, _idx: uint, f: F) -> EncodeResult where
fn emit_map_elt_val<F>(&mut self, _idx: usize, f: F) -> EncodeResult where
F: FnOnce(&mut Encoder<'a>) -> EncodeResult,
{
try!(self.start_tag(EsMapVal as uint));
try!(self.start_tag(EsMapVal as usize));
try!(f(self));
self.end_tag()
}
@ -1381,7 +1380,7 @@ mod tests {
#[test]
fn test_option_int() {
fn test_v(v: Option<int>) {
fn test_v(v: Option<isize>) {
debug!("v == {:?}", v);
let mut wr = Cursor::new(Vec::new());
{

View File

@ -30,7 +30,6 @@
#![feature(collections)]
#![feature(core)]
#![feature(hash)]
#![feature(int_uint)]
#![feature(libc)]
#![feature(old_path)]
#![feature(quote)]

View File

@ -21,82 +21,82 @@ use back::svh::Svh;
// 0xf0..0xff: internally used by RBML to encode 0x100..0xfff in two bytes
// 0x100..0xfff: free for use, preferred for infrequent tags
pub const tag_items: uint = 0x100; // top-level only
pub const tag_items: usize = 0x100; // top-level only
pub const tag_paths_data_name: uint = 0x20;
pub const tag_paths_data_name: usize = 0x20;
pub const tag_def_id: uint = 0x21;
pub const tag_def_id: usize = 0x21;
pub const tag_items_data: uint = 0x22;
pub const tag_items_data: usize = 0x22;
pub const tag_items_data_item: uint = 0x23;
pub const tag_items_data_item: usize = 0x23;
pub const tag_items_data_item_family: uint = 0x24;
pub const tag_items_data_item_family: usize = 0x24;
pub const tag_items_data_item_type: uint = 0x25;
pub const tag_items_data_item_type: usize = 0x25;
pub const tag_items_data_item_symbol: uint = 0x26;
pub const tag_items_data_item_symbol: usize = 0x26;
pub const tag_items_data_item_variant: uint = 0x27;
pub const tag_items_data_item_variant: usize = 0x27;
pub const tag_items_data_parent_item: uint = 0x28;
pub const tag_items_data_parent_item: usize = 0x28;
pub const tag_items_data_item_is_tuple_struct_ctor: uint = 0x29;
pub const tag_items_data_item_is_tuple_struct_ctor: usize = 0x29;
pub const tag_index: uint = 0x2a;
pub const tag_index: usize = 0x2a;
pub const tag_index_buckets: uint = 0x2b;
pub const tag_index_buckets: usize = 0x2b;
pub const tag_index_buckets_bucket: uint = 0x2c;
pub const tag_index_buckets_bucket: usize = 0x2c;
pub const tag_index_buckets_bucket_elt: uint = 0x2d;
pub const tag_index_buckets_bucket_elt: usize = 0x2d;
pub const tag_index_table: uint = 0x2e;
pub const tag_index_table: usize = 0x2e;
pub const tag_meta_item_name_value: uint = 0x2f;
pub const tag_meta_item_name_value: usize = 0x2f;
pub const tag_meta_item_name: uint = 0x30;
pub const tag_meta_item_name: usize = 0x30;
pub const tag_meta_item_value: uint = 0x31;
pub const tag_meta_item_value: usize = 0x31;
pub const tag_attributes: uint = 0x101; // top-level only
pub const tag_attributes: usize = 0x101; // top-level only
pub const tag_attribute: uint = 0x32;
pub const tag_attribute: usize = 0x32;
pub const tag_meta_item_word: uint = 0x33;
pub const tag_meta_item_word: usize = 0x33;
pub const tag_meta_item_list: uint = 0x34;
pub const tag_meta_item_list: usize = 0x34;
// The list of crates that this crate depends on
pub const tag_crate_deps: uint = 0x102; // top-level only
pub const tag_crate_deps: usize = 0x102; // top-level only
// A single crate dependency
pub const tag_crate_dep: uint = 0x35;
pub const tag_crate_dep: usize = 0x35;
pub const tag_crate_hash: uint = 0x103; // top-level only
pub const tag_crate_crate_name: uint = 0x104; // top-level only
pub const tag_crate_hash: usize = 0x103; // top-level only
pub const tag_crate_crate_name: usize = 0x104; // top-level only
pub const tag_crate_dep_crate_name: uint = 0x36;
pub const tag_crate_dep_hash: uint = 0x37;
pub const tag_crate_dep_crate_name: usize = 0x36;
pub const tag_crate_dep_hash: usize = 0x37;
pub const tag_mod_impl: uint = 0x38;
pub const tag_mod_impl: usize = 0x38;
pub const tag_item_trait_item: uint = 0x39;
pub const tag_item_trait_item: usize = 0x39;
pub const tag_item_trait_ref: uint = 0x3a;
pub const tag_item_trait_ref: usize = 0x3a;
// discriminator value for variants
pub const tag_disr_val: uint = 0x3c;
pub const tag_disr_val: usize = 0x3c;
// used to encode ast_map::PathElem
pub const tag_path: uint = 0x3d;
pub const tag_path_len: uint = 0x3e;
pub const tag_path_elem_mod: uint = 0x3f;
pub const tag_path_elem_name: uint = 0x40;
pub const tag_item_field: uint = 0x41;
pub const tag_item_field_origin: uint = 0x42;
pub const tag_path: usize = 0x3d;
pub const tag_path_len: usize = 0x3e;
pub const tag_path_elem_mod: usize = 0x3f;
pub const tag_path_elem_name: usize = 0x40;
pub const tag_item_field: usize = 0x41;
pub const tag_item_field_origin: usize = 0x42;
pub const tag_item_variances: uint = 0x43;
pub const tag_item_variances: usize = 0x43;
/*
trait items contain tag_item_trait_item elements,
impl items contain tag_item_impl_item elements, and classes
@ -105,19 +105,19 @@ pub const tag_item_variances: uint = 0x43;
both, tag_item_trait_item and tag_item_impl_item have to be two
different tags.
*/
pub const tag_item_impl_item: uint = 0x44;
pub const tag_item_trait_method_explicit_self: uint = 0x45;
pub const tag_item_impl_item: usize = 0x44;
pub const tag_item_trait_method_explicit_self: usize = 0x45;
// Reexports are found within module tags. Each reexport contains def_ids
// and names.
pub const tag_items_data_item_reexport: uint = 0x46;
pub const tag_items_data_item_reexport_def_id: uint = 0x47;
pub const tag_items_data_item_reexport_name: uint = 0x48;
pub const tag_items_data_item_reexport: usize = 0x46;
pub const tag_items_data_item_reexport_def_id: usize = 0x47;
pub const tag_items_data_item_reexport_name: usize = 0x48;
// used to encode crate_ctxt side tables
#[derive(Copy, PartialEq, FromPrimitive)]
#[repr(uint)]
#[repr(usize)]
pub enum astencode_tag { // Reserves 0x50 -- 0x6f
tag_ast = 0x50,
@ -149,15 +149,15 @@ pub enum astencode_tag { // Reserves 0x50 -- 0x6f
tag_table_const_qualif = 0x69,
}
pub const tag_item_trait_item_sort: uint = 0x70;
pub const tag_item_trait_item_sort: usize = 0x70;
pub const tag_item_trait_parent_sort: uint = 0x71;
pub const tag_item_trait_parent_sort: usize = 0x71;
pub const tag_item_impl_type_basename: uint = 0x72;
pub const tag_item_impl_type_basename: usize = 0x72;
pub const tag_crate_triple: uint = 0x105; // top-level only
pub const tag_crate_triple: usize = 0x105; // top-level only
pub const tag_dylib_dependency_formats: uint = 0x106; // top-level only
pub const tag_dylib_dependency_formats: usize = 0x106; // top-level only
// Language items are a top-level directory (for speed). Hierarchy:
//
@ -166,47 +166,47 @@ pub const tag_dylib_dependency_formats: uint = 0x106; // top-level only
// - tag_lang_items_item_id: u32
// - tag_lang_items_item_node_id: u32
pub const tag_lang_items: uint = 0x107; // top-level only
pub const tag_lang_items_item: uint = 0x73;
pub const tag_lang_items_item_id: uint = 0x74;
pub const tag_lang_items_item_node_id: uint = 0x75;
pub const tag_lang_items_missing: uint = 0x76;
pub const tag_lang_items: usize = 0x107; // top-level only
pub const tag_lang_items_item: usize = 0x73;
pub const tag_lang_items_item_id: usize = 0x74;
pub const tag_lang_items_item_node_id: usize = 0x75;
pub const tag_lang_items_missing: usize = 0x76;
pub const tag_item_unnamed_field: uint = 0x77;
pub const tag_items_data_item_visibility: uint = 0x78;
pub const tag_item_unnamed_field: usize = 0x77;
pub const tag_items_data_item_visibility: usize = 0x78;
pub const tag_item_method_tps: uint = 0x79;
pub const tag_item_method_fty: uint = 0x7a;
pub const tag_item_method_tps: usize = 0x79;
pub const tag_item_method_fty: usize = 0x7a;
pub const tag_mod_child: uint = 0x7b;
pub const tag_misc_info: uint = 0x108; // top-level only
pub const tag_misc_info_crate_items: uint = 0x7c;
pub const tag_mod_child: usize = 0x7b;
pub const tag_misc_info: usize = 0x108; // top-level only
pub const tag_misc_info_crate_items: usize = 0x7c;
pub const tag_item_method_provided_source: uint = 0x7d;
pub const tag_item_impl_vtables: uint = 0x7e;
pub const tag_item_method_provided_source: usize = 0x7d;
pub const tag_item_impl_vtables: usize = 0x7e;
pub const tag_impls: uint = 0x109; // top-level only
pub const tag_impls_impl: uint = 0x7f;
pub const tag_impls: usize = 0x109; // top-level only
pub const tag_impls_impl: usize = 0x7f;
pub const tag_items_data_item_inherent_impl: uint = 0x80;
pub const tag_items_data_item_extension_impl: uint = 0x81;
pub const tag_items_data_item_inherent_impl: usize = 0x80;
pub const tag_items_data_item_extension_impl: usize = 0x81;
pub const tag_native_libraries: uint = 0x10a; // top-level only
pub const tag_native_libraries_lib: uint = 0x82;
pub const tag_native_libraries_name: uint = 0x83;
pub const tag_native_libraries_kind: uint = 0x84;
pub const tag_native_libraries: usize = 0x10a; // top-level only
pub const tag_native_libraries_lib: usize = 0x82;
pub const tag_native_libraries_name: usize = 0x83;
pub const tag_native_libraries_kind: usize = 0x84;
pub const tag_plugin_registrar_fn: uint = 0x10b; // top-level only
pub const tag_plugin_registrar_fn: usize = 0x10b; // top-level only
pub const tag_method_argument_names: uint = 0x85;
pub const tag_method_argument_name: uint = 0x86;
pub const tag_method_argument_names: usize = 0x85;
pub const tag_method_argument_name: usize = 0x86;
pub const tag_reachable_extern_fns: uint = 0x10c; // top-level only
pub const tag_reachable_extern_fn_id: uint = 0x87;
pub const tag_reachable_extern_fns: usize = 0x10c; // top-level only
pub const tag_reachable_extern_fn_id: usize = 0x87;
pub const tag_items_data_item_stability: uint = 0x88;
pub const tag_items_data_item_stability: usize = 0x88;
pub const tag_items_data_item_repr: uint = 0x89;
pub const tag_items_data_item_repr: usize = 0x89;
#[derive(Clone, Debug)]
pub struct LinkMeta {
@ -214,45 +214,45 @@ pub struct LinkMeta {
pub crate_hash: Svh,
}
pub const tag_struct_fields: uint = 0x10d; // top-level only
pub const tag_struct_field: uint = 0x8a;
pub const tag_struct_field_id: uint = 0x8b;
pub const tag_struct_fields: usize = 0x10d; // top-level only
pub const tag_struct_field: usize = 0x8a;
pub const tag_struct_field_id: usize = 0x8b;
pub const tag_attribute_is_sugared_doc: uint = 0x8c;
pub const tag_attribute_is_sugared_doc: usize = 0x8c;
pub const tag_items_data_region: uint = 0x8e;
pub const tag_items_data_region: usize = 0x8e;
pub const tag_region_param_def: uint = 0x8f;
pub const tag_region_param_def_ident: uint = 0x90;
pub const tag_region_param_def_def_id: uint = 0x91;
pub const tag_region_param_def_space: uint = 0x92;
pub const tag_region_param_def_index: uint = 0x93;
pub const tag_region_param_def: usize = 0x8f;
pub const tag_region_param_def_ident: usize = 0x90;
pub const tag_region_param_def_def_id: usize = 0x91;
pub const tag_region_param_def_space: usize = 0x92;
pub const tag_region_param_def_index: usize = 0x93;
pub const tag_type_param_def: uint = 0x94;
pub const tag_type_param_def: usize = 0x94;
pub const tag_item_generics: uint = 0x95;
pub const tag_method_ty_generics: uint = 0x96;
pub const tag_item_generics: usize = 0x95;
pub const tag_method_ty_generics: usize = 0x96;
pub const tag_predicate: uint = 0x97;
pub const tag_predicate_space: uint = 0x98;
pub const tag_predicate_data: uint = 0x99;
pub const tag_predicate: usize = 0x97;
pub const tag_predicate_space: usize = 0x98;
pub const tag_predicate_data: usize = 0x99;
pub const tag_unsafety: uint = 0x9a;
pub const tag_unsafety: usize = 0x9a;
pub const tag_associated_type_names: uint = 0x9b;
pub const tag_associated_type_name: uint = 0x9c;
pub const tag_associated_type_names: usize = 0x9b;
pub const tag_associated_type_name: usize = 0x9c;
pub const tag_polarity: uint = 0x9d;
pub const tag_polarity: usize = 0x9d;
pub const tag_macro_defs: uint = 0x10e; // top-level only
pub const tag_macro_def: uint = 0x9e;
pub const tag_macro_def_body: uint = 0x9f;
pub const tag_macro_defs: usize = 0x10e; // top-level only
pub const tag_macro_def: usize = 0x9e;
pub const tag_macro_def_body: usize = 0x9f;
pub const tag_paren_sugar: uint = 0xa0;
pub const tag_paren_sugar: usize = 0xa0;
pub const tag_codemap: uint = 0xa1;
pub const tag_codemap_filemap: uint = 0xa2;
pub const tag_codemap: usize = 0xa1;
pub const tag_codemap_filemap: usize = 0xa2;
pub const tag_item_super_predicates: uint = 0xa3;
pub const tag_item_super_predicates: usize = 0xa3;
pub const tag_defaulted_trait: uint = 0xa4;
pub const tag_defaulted_trait: usize = 0xa4;

View File

@ -46,7 +46,7 @@ pub fn each_lang_item<F>(cstore: &cstore::CStore,
cnum: ast::CrateNum,
f: F)
-> bool where
F: FnMut(ast::NodeId, uint) -> bool,
F: FnMut(ast::NodeId, usize) -> bool,
{
let crate_data = cstore.get_crate_data(cnum);
decoder::each_lang_item(&*crate_data, f)

View File

@ -252,7 +252,7 @@ impl MetadataBlob {
let len = (((slice[0] as u32) << 24) |
((slice[1] as u32) << 16) |
((slice[2] as u32) << 8) |
((slice[3] as u32) << 0)) as uint;
((slice[3] as u32) << 0)) as usize;
if len + 4 <= slice.len() {
&slice[4.. len + 4]
} else {

View File

@ -71,15 +71,15 @@ fn lookup_hash<'a, F>(d: rbml::Doc<'a>, mut eq_fn: F, hash: u64) -> Option<rbml:
{
let index = reader::get_doc(d, tag_index);
let table = reader::get_doc(index, tag_index_table);
let hash_pos = table.start + (hash % 256 * 4) as uint;
let pos = u32_from_be_bytes(&d.data[hash_pos..]) as uint;
let hash_pos = table.start + (hash % 256 * 4) as usize;
let pos = u32_from_be_bytes(&d.data[hash_pos..]) as usize;
let tagged_doc = reader::doc_at(d.data, pos).unwrap();
let belt = tag_index_buckets_bucket_elt;
let mut ret = None;
reader::tagged_docs(tagged_doc.doc, belt, |elt| {
let pos = u32_from_be_bytes(&elt.data[elt.start..]) as uint;
let pos = u32_from_be_bytes(&elt.data[elt.start..]) as usize;
if eq_fn(&elt.data[elt.start + 4 .. elt.end]) {
ret = Some(reader::doc_at(d.data, pos).unwrap().doc);
false
@ -274,7 +274,7 @@ fn item_path(item_doc: rbml::Doc) -> Vec<ast_map::PathElem> {
let path_doc = reader::get_doc(item_doc, tag_path);
let len_doc = reader::get_doc(path_doc, tag_path_len);
let len = reader::doc_as_u32(len_doc) as uint;
let len = reader::doc_as_u32(len_doc) as usize;
let mut result = Vec::with_capacity(len);
reader::docs(path_doc, |tag, elt_doc| {
@ -513,13 +513,13 @@ pub enum DefLike {
/// Iterates over the language items in the given crate.
pub fn each_lang_item<F>(cdata: Cmd, mut f: F) -> bool where
F: FnMut(ast::NodeId, uint) -> bool,
F: FnMut(ast::NodeId, usize) -> bool,
{
let root = rbml::Doc::new(cdata.data());
let lang_items = reader::get_doc(root, tag_lang_items);
reader::tagged_docs(lang_items, tag_lang_items_item, |item_doc| {
let id_doc = reader::get_doc(item_doc, tag_lang_items_item_id);
let id = reader::doc_as_u32(id_doc) as uint;
let id = reader::doc_as_u32(id_doc) as usize;
let node_id_doc = reader::get_doc(item_doc,
tag_lang_items_item_node_id);
let node_id = reader::doc_as_u32(node_id_doc) as ast::NodeId;
@ -1194,7 +1194,7 @@ pub fn get_crate_deps(data: &[u8]) -> Vec<CrateDep> {
let cratedoc = rbml::Doc::new(data);
let depsdoc = reader::get_doc(cratedoc, tag_crate_deps);
let mut crate_num = 1;
fn docstr(doc: rbml::Doc, tag_: uint) -> String {
fn docstr(doc: rbml::Doc, tag_: usize) -> String {
let d = reader::get_doc(doc, tag_);
d.as_str_slice().to_string()
}
@ -1454,7 +1454,7 @@ pub fn is_typedef(cdata: Cmd, id: ast::NodeId) -> bool {
fn doc_generics<'tcx>(base_doc: rbml::Doc,
tcx: &ty::ctxt<'tcx>,
cdata: Cmd,
tag: uint)
tag: usize)
-> ty::Generics<'tcx>
{
let doc = reader::get_doc(base_doc, tag);
@ -1479,7 +1479,7 @@ fn doc_generics<'tcx>(base_doc: rbml::Doc,
let def_id = translate_def_id(cdata, def_id);
let doc = reader::get_doc(rp_doc, tag_region_param_def_space);
let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc) as uint);
let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc) as usize);
let doc = reader::get_doc(rp_doc, tag_region_param_def_index);
let index = reader::doc_as_u64(doc) as u32;
@ -1508,7 +1508,7 @@ fn doc_generics<'tcx>(base_doc: rbml::Doc,
fn doc_predicates<'tcx>(base_doc: rbml::Doc,
tcx: &ty::ctxt<'tcx>,
cdata: Cmd,
tag: uint)
tag: usize)
-> ty::GenericPredicates<'tcx>
{
let doc = reader::get_doc(base_doc, tag);
@ -1516,7 +1516,7 @@ fn doc_predicates<'tcx>(base_doc: rbml::Doc,
let mut predicates = subst::VecPerParamSpace::empty();
reader::tagged_docs(doc, tag_predicate, |predicate_doc| {
let space_doc = reader::get_doc(predicate_doc, tag_predicate_space);
let space = subst::ParamSpace::from_uint(reader::doc_as_u8(space_doc) as uint);
let space = subst::ParamSpace::from_uint(reader::doc_as_u8(space_doc) as usize);
let data_doc = reader::get_doc(predicate_doc, tag_predicate_data);
let data = parse_predicate_data(data_doc.data, data_doc.start, cdata.cnum, tcx,

View File

@ -105,7 +105,7 @@ struct entry<T> {
fn encode_trait_ref<'a, 'tcx>(rbml_w: &mut Encoder,
ecx: &EncodeContext<'a, 'tcx>,
trait_ref: &ty::TraitRef<'tcx>,
tag: uint) {
tag: usize) {
let ty_str_ctxt = &tyencode::ctxt {
diag: ecx.diag,
ds: def_to_string,
@ -703,7 +703,7 @@ fn encode_generics<'a, 'tcx>(rbml_w: &mut Encoder,
ecx: &EncodeContext<'a, 'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>,
tag: uint)
tag: usize)
{
rbml_w.start_tag(tag);
@ -777,7 +777,7 @@ fn encode_predicates_in_current_doc<'a,'tcx>(rbml_w: &mut Encoder,
fn encode_predicates<'a,'tcx>(rbml_w: &mut Encoder,
ecx: &EncodeContext<'a,'tcx>,
predicates: &ty::GenericPredicates<'tcx>,
tag: uint)
tag: usize)
{
rbml_w.start_tag(tag);
encode_predicates_in_current_doc(rbml_w, ecx, predicates);
@ -1538,7 +1538,7 @@ fn encode_index<T, F>(rbml_w: &mut Encoder, index: Vec<entry<T>>, mut write_fn:
for elt in index {
let mut s = SipHasher::new();
elt.val.hash(&mut s);
let h = s.finish() as uint;
let h = s.finish() as usize;
(&mut buckets[h % 256]).push(elt);
}
@ -1944,7 +1944,7 @@ pub fn encode_metadata(parms: EncodeParams, krate: &ast::Crate) -> Vec<u8> {
// RBML compacts the encoded bytes whenever appropriate,
// so there are some garbages left after the end of the data.
let metalen = wr.seek(SeekFrom::Current(0)).unwrap() as uint;
let metalen = wr.seek(SeekFrom::Current(0)).unwrap() as usize;
let mut v = wr.into_inner();
v.truncate(metalen);
assert_eq!(v.len(), metalen);

View File

@ -745,7 +745,7 @@ fn get_metadata_section_imp(is_osx: bool, filename: &Path) -> Result<MetadataBlo
unsafe {
let buf = common::path2cstr(filename);
let mb = llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf.as_ptr());
if mb as int == 0 {
if mb as isize == 0 {
return Err(format!("error reading library: '{}'",
filename.display()))
}
@ -761,12 +761,12 @@ fn get_metadata_section_imp(is_osx: bool, filename: &Path) -> Result<MetadataBlo
let mut name_buf = ptr::null();
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = slice::from_raw_parts(name_buf as *const u8,
name_len as uint).to_vec();
name_len as usize).to_vec();
let name = String::from_utf8(name).unwrap();
debug!("get_metadata_section: name {}", name);
if read_meta_section_name(is_osx) == name {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as uint;
let csz = llvm::LLVMGetSectionSize(si.llsi) as usize;
let cvbuf: *const u8 = cbuf as *const u8;
let vlen = encoder::metadata_encoding_version.len();
debug!("checking {} bytes of metadata-version stamp",
@ -779,7 +779,7 @@ fn get_metadata_section_imp(is_osx: bool, filename: &Path) -> Result<MetadataBlo
filename.display())));
}
let cvbuf1 = cvbuf.offset(vlen as int);
let cvbuf1 = cvbuf.offset(vlen as isize);
debug!("inflating {} bytes of compressed metadata",
csz - vlen);
let bytes = slice::from_raw_parts(cvbuf1, csz - vlen);

View File

@ -66,7 +66,7 @@ pub enum DefIdSource {
pub struct PState<'a, 'tcx: 'a> {
data: &'a [u8],
krate: ast::CrateNum,
pos: uint,
pos: usize,
tcx: &'a ty::ctxt<'tcx>
}
@ -119,7 +119,7 @@ fn parse_name_<F>(st: &mut PState, is_last: F) -> ast::Name where
}
pub fn parse_state_from_data<'a, 'tcx>(data: &'a [u8], crate_num: ast::CrateNum,
pos: uint, tcx: &'a ty::ctxt<'tcx>)
pos: usize, tcx: &'a ty::ctxt<'tcx>)
-> PState<'a, 'tcx> {
PState {
data: data,
@ -129,7 +129,7 @@ pub fn parse_state_from_data<'a, 'tcx>(data: &'a [u8], crate_num: ast::CrateNum,
}
}
fn data_log_string(data: &[u8], pos: uint) -> String {
fn data_log_string(data: &[u8], pos: usize) -> String {
let mut buf = String::new();
buf.push_str("<<");
for i in pos..data.len() {
@ -146,7 +146,7 @@ fn data_log_string(data: &[u8], pos: uint) -> String {
pub fn parse_ty_closure_data<'tcx, F>(data: &[u8],
crate_num: ast::CrateNum,
pos: uint,
pos: usize,
tcx: &ty::ctxt<'tcx>,
conv: F)
-> ty::ClosureTy<'tcx> where
@ -156,7 +156,7 @@ pub fn parse_ty_closure_data<'tcx, F>(data: &[u8],
parse_closure_ty(&mut st, conv)
}
pub fn parse_ty_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: uint,
pub fn parse_ty_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: usize,
tcx: &ty::ctxt<'tcx>, conv: F) -> Ty<'tcx> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
@ -165,7 +165,7 @@ pub fn parse_ty_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: uint,
parse_ty(&mut st, conv)
}
pub fn parse_region_data<F>(data: &[u8], crate_num: ast::CrateNum, pos: uint, tcx: &ty::ctxt,
pub fn parse_region_data<F>(data: &[u8], crate_num: ast::CrateNum, pos: usize, tcx: &ty::ctxt,
conv: F) -> ty::Region where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
@ -174,7 +174,7 @@ pub fn parse_region_data<F>(data: &[u8], crate_num: ast::CrateNum, pos: uint, tc
parse_region(&mut st, conv)
}
pub fn parse_bare_fn_ty_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: uint,
pub fn parse_bare_fn_ty_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: usize,
tcx: &ty::ctxt<'tcx>, conv: F)
-> ty::BareFnTy<'tcx> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
@ -184,7 +184,7 @@ pub fn parse_bare_fn_ty_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos
parse_bare_fn_ty(&mut st, conv)
}
pub fn parse_trait_ref_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: uint,
pub fn parse_trait_ref_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: usize,
tcx: &ty::ctxt<'tcx>, conv: F)
-> Rc<ty::TraitRef<'tcx>> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
@ -194,7 +194,7 @@ pub fn parse_trait_ref_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos:
parse_trait_ref(&mut st, conv)
}
pub fn parse_substs_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: uint,
pub fn parse_substs_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: usize,
tcx: &ty::ctxt<'tcx>, conv: F) -> subst::Substs<'tcx> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
@ -204,7 +204,7 @@ pub fn parse_substs_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum, pos: ui
}
pub fn parse_bounds_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum,
pos: uint, tcx: &ty::ctxt<'tcx>, conv: F)
pos: usize, tcx: &ty::ctxt<'tcx>, conv: F)
-> ty::ParamBounds<'tcx> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
@ -213,7 +213,7 @@ pub fn parse_bounds_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum,
}
pub fn parse_existential_bounds_data<'tcx, F>(data: &[u8], crate_num: ast::CrateNum,
pos: uint, tcx: &ty::ctxt<'tcx>, conv: F)
pos: usize, tcx: &ty::ctxt<'tcx>, conv: F)
-> ty::ExistentialBounds<'tcx> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
@ -222,7 +222,7 @@ pub fn parse_existential_bounds_data<'tcx, F>(data: &[u8], crate_num: ast::Crate
}
pub fn parse_builtin_bounds_data<F>(data: &[u8], crate_num: ast::CrateNum,
pos: uint, tcx: &ty::ctxt, conv: F)
pos: usize, tcx: &ty::ctxt, conv: F)
-> ty::BuiltinBounds where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,
{
@ -230,7 +230,7 @@ pub fn parse_builtin_bounds_data<F>(data: &[u8], crate_num: ast::CrateNum,
parse_builtin_bounds(&mut st, conv)
}
fn parse_size(st: &mut PState) -> Option<uint> {
fn parse_size(st: &mut PState) -> Option<usize> {
assert_eq!(next(st), '/');
if peek(st) == '|' {
@ -447,8 +447,8 @@ fn parse_ty_<'a, 'tcx, F>(st: &mut PState<'a, 'tcx>, conv: &mut F) -> Ty<'tcx> w
let tcx = st.tcx;
match next(st) {
'b' => return tcx.types.bool,
'i' => { /* eat the s of is */ next(st); return tcx.types.int },
'u' => { /* eat the s of us */ next(st); return tcx.types.uint },
'i' => { /* eat the s of is */ next(st); return tcx.types.isize },
'u' => { /* eat the s of us */ next(st); return tcx.types.usize },
'M' => {
match next(st) {
'b' => return tcx.types.u8,
@ -592,21 +592,21 @@ fn parse_def_<F>(st: &mut PState, source: DefIdSource, conv: &mut F) -> ast::Def
return (*conv)(source, scan(st, |c| { c == '|' }, parse_def_id));
}
fn parse_uint(st: &mut PState) -> uint {
fn parse_uint(st: &mut PState) -> usize {
let mut n = 0;
loop {
let cur = peek(st);
if cur < '0' || cur > '9' { return n; }
st.pos = st.pos + 1;
n *= 10;
n += (cur as uint) - ('0' as uint);
n += (cur as usize) - ('0' as usize);
};
}
fn parse_u32(st: &mut PState) -> u32 {
let n = parse_uint(st);
let m = n as u32;
assert_eq!(m as uint, n);
assert_eq!(m as usize, n);
m
}
@ -614,7 +614,7 @@ fn parse_param_space(st: &mut PState) -> subst::ParamSpace {
subst::ParamSpace::from_uint(parse_uint(st))
}
fn parse_hex(st: &mut PState) -> uint {
fn parse_hex(st: &mut PState) -> usize {
let mut n = 0;
loop {
let cur = peek(st);
@ -622,8 +622,8 @@ fn parse_hex(st: &mut PState) -> uint {
st.pos = st.pos + 1;
n *= 16;
if '0' <= cur && cur <= '9' {
n += (cur as uint) - ('0' as uint);
} else { n += 10 + (cur as uint) - ('a' as uint); }
n += (cur as usize) - ('0' as usize);
} else { n += 10 + (cur as usize) - ('a' as usize); }
};
}
@ -725,14 +725,14 @@ pub fn parse_def_id(buf: &[u8]) -> ast::DefId {
let def_part = &buf[colon_idx + 1..len];
let crate_num = match str::from_utf8(crate_part).ok().and_then(|s| {
s.parse::<uint>().ok()
s.parse::<usize>().ok()
}) {
Some(cn) => cn as ast::CrateNum,
None => panic!("internal error: parse_def_id: crate number expected, found {:?}",
crate_part)
};
let def_num = match str::from_utf8(def_part).ok().and_then(|s| {
s.parse::<uint>().ok()
s.parse::<usize>().ok()
}) {
Some(dn) => dn as ast::NodeId,
None => panic!("internal error: parse_def_id: id expected, found {:?}",
@ -742,7 +742,7 @@ pub fn parse_def_id(buf: &[u8]) -> ast::DefId {
}
pub fn parse_predicate_data<'tcx, F>(data: &[u8],
start: uint,
start: usize,
crate_num: ast::CrateNum,
tcx: &ty::ctxt<'tcx>,
conv: F)
@ -794,7 +794,7 @@ fn parse_projection_predicate_<'a,'tcx, F>(
}
}
pub fn parse_type_param_def_data<'tcx, F>(data: &[u8], start: uint,
pub fn parse_type_param_def_data<'tcx, F>(data: &[u8], start: usize,
crate_num: ast::CrateNum, tcx: &ty::ctxt<'tcx>,
conv: F) -> ty::TypeParameterDef<'tcx> where
F: FnMut(DefIdSource, ast::DefId) -> ast::DefId,

View File

@ -19,10 +19,10 @@ use middle::ty::{self, Ty};
use syntax::ast;
use util::ppaux::Repr;
pub const NO_REGIONS: uint = 1;
pub const NO_TPS: uint = 2;
pub const NO_REGIONS: usize = 1;
pub const NO_TPS: usize = 2;
pub fn check_path_args(tcx: &ty::ctxt, segments: &[ast::PathSegment], flags: uint) {
pub fn check_path_args(tcx: &ty::ctxt, segments: &[ast::PathSegment], flags: usize) {
for segment in segments {
if (flags & NO_TPS) != 0 {
for typ in segment.parameters.types() {

View File

@ -93,7 +93,7 @@ pub fn encode_inlined_item(ecx: &e::EncodeContext,
let ii = simplify_ast(ii);
let id_range = ast_util::compute_id_range_for_inlined_item(&ii);
rbml_w.start_tag(c::tag_ast as uint);
rbml_w.start_tag(c::tag_ast as usize);
id_range.encode(rbml_w);
encode_ast(rbml_w, &ii);
encode_side_tables_for_ii(ecx, rbml_w, &ii);
@ -360,7 +360,7 @@ impl<D:serialize::Decoder> def_id_decoder_helpers for D
// but eventually we should add entries to the local codemap as required.
fn encode_ast(rbml_w: &mut Encoder, item: &ast::InlinedItem) {
rbml_w.start_tag(c::tag_tree as uint);
rbml_w.start_tag(c::tag_tree as usize);
item.encode(rbml_w);
rbml_w.end_tag();
}
@ -437,7 +437,7 @@ fn simplify_ast(ii: e::InlinedItemRef) -> ast::InlinedItem {
}
fn decode_ast(par_doc: rbml::Doc) -> ast::InlinedItem {
let chi_doc = par_doc.get(c::tag_tree as uint);
let chi_doc = par_doc.get(c::tag_tree as usize);
let mut d = reader::Decoder::new(chi_doc);
Decodable::decode(&mut d).unwrap()
}
@ -1150,7 +1150,7 @@ impl<'a> write_tag_and_id for Encoder<'a> {
f: F) where
F: FnOnce(&mut Encoder<'a>),
{
self.start_tag(tag_id as uint);
self.start_tag(tag_id as usize);
f(self);
self.end_tag();
}
@ -1175,7 +1175,7 @@ impl<'a, 'b, 'c, 'tcx> ast_util::IdVisitingOperation for
fn encode_side_tables_for_ii(ecx: &e::EncodeContext,
rbml_w: &mut Encoder,
ii: &ast::InlinedItem) {
rbml_w.start_tag(c::tag_table as uint);
rbml_w.start_tag(c::tag_table as usize);
ast_util::visit_ids_for_inlined_item(ii, &mut SideTableEncodingIdVisitor {
ecx: ecx,
rbml_w: rbml_w
@ -1323,14 +1323,14 @@ fn encode_side_tables_for_id(ecx: &e::EncodeContext,
}
trait doc_decoder_helpers {
fn as_int(&self) -> int;
fn as_int(&self) -> isize;
fn opt_child(&self, tag: c::astencode_tag) -> Option<Self>;
}
impl<'a> doc_decoder_helpers for rbml::Doc<'a> {
fn as_int(&self) -> int { reader::doc_as_u64(*self) as int }
fn as_int(&self) -> isize { reader::doc_as_u64(*self) as isize }
fn opt_child(&self, tag: c::astencode_tag) -> Option<rbml::Doc<'a>> {
reader::maybe_get_doc(*self, tag as uint)
reader::maybe_get_doc(*self, tag as usize)
}
}
@ -1746,7 +1746,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
this.read_enum_variant(variants, |this, i| {
Ok(match i {
0 => {
let len: uint =
let len: usize =
this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap();
ty::UnsizeLength(len)
@ -1755,7 +1755,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
let uk: ty::UnsizeKind =
this.read_enum_variant_arg(0,
|this| Ok(this.read_unsize_kind(dcx))).unwrap();
let idx: uint =
let idx: usize =
this.read_enum_variant_arg(1, |this| Decodable::decode(this)).unwrap();
ty::UnsizeStruct(box uk, idx)
@ -1851,7 +1851,7 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
fn decode_side_tables(dcx: &DecodeContext,
ast_doc: rbml::Doc) {
let tbl_doc = ast_doc.get(c::tag_table as uint);
let tbl_doc = ast_doc.get(c::tag_table as usize);
reader::docs(tbl_doc, |tag, entry_doc| {
let mut entry_dsr = reader::Decoder::new(entry_doc);
let id0: ast::NodeId = Decodable::decode(&mut entry_dsr).unwrap();
@ -1969,14 +1969,14 @@ fn decode_side_tables(dcx: &DecodeContext,
#[cfg(test)]
fn encode_item_ast(rbml_w: &mut Encoder, item: &ast::Item) {
rbml_w.start_tag(c::tag_tree as uint);
rbml_w.start_tag(c::tag_tree as usize);
(*item).encode(rbml_w);
rbml_w.end_tag();
}
#[cfg(test)]
fn decode_item_ast(par_doc: rbml::Doc) -> ast::Item {
let chi_doc = par_doc.get(c::tag_tree as uint);
let chi_doc = par_doc.get(c::tag_tree as usize);
let mut d = reader::Decoder::new(chi_doc);
Decodable::decode(&mut d).unwrap()
}
@ -2035,7 +2035,7 @@ fn test_basic() {
fn test_smalltalk() {
let cx = mk_ctxt();
roundtrip(quote_item!(&cx,
fn foo() -> int { 3 + 4 } // first smalltalk program ever executed.
fn foo() -> isize { 3 + 4 } // first smalltalk program ever executed.
));
}
*/
@ -2044,7 +2044,7 @@ fn test_smalltalk() {
fn test_more() {
let cx = mk_ctxt();
roundtrip(quote_item!(&cx,
fn foo(x: uint, y: uint) -> uint {
fn foo(x: usize, y: usize) -> usize {
let z = x + y;
return z;
}
@ -2055,15 +2055,15 @@ fn test_more() {
fn test_simplification() {
let cx = mk_ctxt();
let item = quote_item!(&cx,
fn new_int_alist<B>() -> alist<int, B> {
fn eq_int(a: int, b: int) -> bool { a == b }
fn new_int_alist<B>() -> alist<isize, B> {
fn eq_int(a: isize, b: isize) -> bool { a == b }
return alist {eq_fn: eq_int, data: Vec::new()};
}
).unwrap();
let item_in = e::IIItemRef(&*item);
let item_out = simplify_ast(item_in);
let item_exp = ast::IIItem(quote_item!(&cx,
fn new_int_alist<B>() -> alist<int, B> {
fn new_int_alist<B>() -> alist<isize, B> {
return alist {eq_fn: eq_int, data: Vec::new()};
}
).unwrap());

View File

@ -72,7 +72,7 @@ impl<'a> fmt::Debug for Matrix<'a> {
let column_count = m.iter().map(|row| row.len()).max().unwrap_or(0);
assert!(m.iter().all(|row| row.len() == column_count));
let column_widths: Vec<uint> = (0..column_count).map(|col| {
let column_widths: Vec<usize> = (0..column_count).map(|col| {
pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)
}).collect();
@ -116,9 +116,9 @@ pub enum Constructor {
/// Ranges of literal values (2..5).
ConstantRange(const_val, const_val),
/// Array patterns of length n.
Slice(uint),
Slice(usize),
/// Array patterns with a subslice.
SliceWithSubslice(uint, uint)
SliceWithSubslice(usize, usize)
}
#[derive(Clone, PartialEq)]
@ -498,7 +498,7 @@ impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
/// left_ty: tuple of 3 elements
/// pats: [10, 20, _] => (10, 20, _)
///
/// left_ty: struct X { a: (bool, &'static str), b: uint}
/// left_ty: struct X { a: (bool, &'static str), b: usize}
/// pats: [(false, "foo"), 42] => X { a: (false, "foo"), b: 42 }
fn construct_witness(cx: &MatchCheckCtxt, ctor: &Constructor,
pats: Vec<&Pat>, left_ty: Ty) -> P<Pat> {
@ -580,7 +580,7 @@ fn construct_witness(cx: &MatchCheckCtxt, ctor: &Constructor,
}
fn missing_constructor(cx: &MatchCheckCtxt, &Matrix(ref rows): &Matrix,
left_ty: Ty, max_slice_length: uint) -> Option<Constructor> {
left_ty: Ty, max_slice_length: usize) -> Option<Constructor> {
let used_constructors: Vec<Constructor> = rows.iter()
.flat_map(|row| pat_constructors(cx, row[0], left_ty, max_slice_length).into_iter())
.collect();
@ -594,7 +594,7 @@ fn missing_constructor(cx: &MatchCheckCtxt, &Matrix(ref rows): &Matrix,
/// but is instead bounded by the maximum fixed length of slice patterns in
/// the column of patterns being analyzed.
fn all_constructors(cx: &MatchCheckCtxt, left_ty: Ty,
max_slice_length: uint) -> Vec<Constructor> {
max_slice_length: usize) -> Vec<Constructor> {
match left_ty.sty {
ty::ty_bool =>
[true, false].iter().map(|b| ConstantValue(const_bool(*b))).collect(),
@ -741,7 +741,7 @@ fn is_useful_specialized(cx: &MatchCheckCtxt, &Matrix(ref m): &Matrix,
/// On the other hand, a wild pattern and an identifier pattern cannot be
/// specialized in any way.
fn pat_constructors(cx: &MatchCheckCtxt, p: &Pat,
left_ty: Ty, max_slice_length: uint) -> Vec<Constructor> {
left_ty: Ty, max_slice_length: usize) -> Vec<Constructor> {
let pat = raw_pat(p);
match pat.node {
ast::PatIdent(..) =>
@ -798,7 +798,7 @@ fn pat_constructors(cx: &MatchCheckCtxt, p: &Pat,
///
/// For instance, a tuple pattern (_, 42, Some([])) has the arity of 3.
/// A struct pattern's arity is the number of fields it contains, etc.
pub fn constructor_arity(cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> uint {
pub fn constructor_arity(cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize {
match ty.sty {
ty::ty_tup(ref fs) => fs.len(),
ty::ty_uniq(_) => 1,
@ -850,7 +850,7 @@ fn range_covered_by_constructor(ctor: &Constructor,
/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
/// fields filled with wild patterns.
pub fn specialize<'a>(cx: &MatchCheckCtxt, r: &[&'a Pat],
constructor: &Constructor, col: uint, arity: uint) -> Option<Vec<&'a Pat>> {
constructor: &Constructor, col: usize, arity: usize) -> Option<Vec<&'a Pat>> {
let &Pat {
id: pat_id, ref node, span: pat_span
} = raw_pat(r[col]);

View File

@ -262,8 +262,8 @@ impl ConstEvalErr {
CannotCastTo(s) => format!("can't cast this type to {}", s).into_cow(),
InvalidOpForBools(_) => "can't do this op on bools".into_cow(),
InvalidOpForFloats(_) => "can't do this op on floats".into_cow(),
InvalidOpForIntUint(..) => "can't do this op on an int and uint".into_cow(),
InvalidOpForUintInt(..) => "can't do this op on a uint and int".into_cow(),
InvalidOpForIntUint(..) => "can't do this op on an isize and usize".into_cow(),
InvalidOpForUintInt(..) => "can't do this op on a usize and isize".into_cow(),
NegateOnString => "negate on string".into_cow(),
NegateOnBoolean => "negate on boolean".into_cow(),
NegateOnBinary => "negate on binary literal".into_cow(),
@ -369,7 +369,7 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
}
ast::ExprBinary(op, ref a, ref b) => {
let b_ty = match op.node {
ast::BiShl | ast::BiShr => Some(tcx.types.uint),
ast::BiShl | ast::BiShr => Some(tcx.types.usize),
_ => ety
};
match (try!(eval_const_expr_partial(tcx, &**a, ety)),
@ -434,8 +434,8 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
ast::BiAnd | ast::BiBitAnd => const_int(a & b),
ast::BiOr | ast::BiBitOr => const_int(a | b),
ast::BiBitXor => const_int(a ^ b),
ast::BiShl => const_int(a << b as uint),
ast::BiShr => const_int(a >> b as uint),
ast::BiShl => const_int(a << b as usize),
ast::BiShr => const_int(a >> b as usize),
ast::BiEq => fromb(a == b),
ast::BiLt => fromb(a < b),
ast::BiLe => fromb(a <= b),
@ -456,8 +456,8 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
ast::BiAnd | ast::BiBitAnd => const_uint(a & b),
ast::BiOr | ast::BiBitOr => const_uint(a | b),
ast::BiBitXor => const_uint(a ^ b),
ast::BiShl => const_uint(a << b as uint),
ast::BiShr => const_uint(a >> b as uint),
ast::BiShl => const_uint(a << b as usize),
ast::BiShr => const_uint(a >> b as usize),
ast::BiEq => fromb(a == b),
ast::BiLt => fromb(a < b),
ast::BiLe => fromb(a <= b),
@ -469,15 +469,15 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>,
// shifts can have any integral type as their rhs
(const_int(a), const_uint(b)) => {
match op.node {
ast::BiShl => const_int(a << b as uint),
ast::BiShr => const_int(a >> b as uint),
ast::BiShl => const_int(a << b as usize),
ast::BiShr => const_int(a >> b as usize),
_ => signal!(e, InvalidOpForIntUint(op.node)),
}
}
(const_uint(a), const_int(b)) => {
match op.node {
ast::BiShl => const_uint(a << b as uint),
ast::BiShr => const_uint(a >> b as uint),
ast::BiShl => const_uint(a << b as usize),
ast::BiShr => const_uint(a >> b as usize),
_ => signal!(e, InvalidOpForUintInt(op.node)),
}
}
@ -628,12 +628,12 @@ fn cast_const(val: const_val, ty: Ty) -> Result<const_val, ErrKind> {
}
define_casts!{
ty::ty_int(ast::TyIs) => (int, const_int, i64),
ty::ty_int(ast::TyIs) => (isize, const_int, i64),
ty::ty_int(ast::TyI8) => (i8, const_int, i64),
ty::ty_int(ast::TyI16) => (i16, const_int, i64),
ty::ty_int(ast::TyI32) => (i32, const_int, i64),
ty::ty_int(ast::TyI64) => (i64, const_int, i64),
ty::ty_uint(ast::TyUs) => (uint, const_uint, u64),
ty::ty_uint(ast::TyUs) => (usize, const_uint, u64),
ty::ty_uint(ast::TyU8) => (u8, const_uint, u64),
ty::ty_uint(ast::TyU16) => (u16, const_uint, u64),
ty::ty_uint(ast::TyU32) => (u32, const_uint, u64),

View File

@ -45,11 +45,11 @@ pub struct DataFlowContext<'a, 'tcx: 'a, O> {
oper: O,
/// number of bits to propagate per id
bits_per_id: uint,
bits_per_id: usize,
/// number of words we will use to store bits_per_id.
/// equal to bits_per_id/usize::BITS rounded up.
words_per_id: uint,
words_per_id: usize,
// mapping from node to cfg node index
// FIXME (#6298): Shouldn't this go with CFG?
@ -62,19 +62,19 @@ pub struct DataFlowContext<'a, 'tcx: 'a, O> {
// the full vector (see the method `compute_id_range()`).
/// bits generated as we exit the cfg node. Updated by `add_gen()`.
gens: Vec<uint>,
gens: Vec<usize>,
/// bits killed as we exit the cfg node. Updated by `add_kill()`.
kills: Vec<uint>,
kills: Vec<usize>,
/// bits that are valid on entry to the cfg node. Updated by
/// `propagate()`.
on_entry: Vec<uint>,
on_entry: Vec<usize>,
}
pub trait BitwiseOperator {
/// Joins two predecessor bits together, typically either `|` or `&`
fn join(&self, succ: uint, pred: uint) -> uint;
fn join(&self, succ: usize, pred: usize) -> usize;
}
/// Parameterization for the precise form of data flow that is used.
@ -204,7 +204,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
cfg: &cfg::CFG,
oper: O,
id_range: IdRange,
bits_per_id: uint) -> DataFlowContext<'a, 'tcx, O> {
bits_per_id: usize) -> DataFlowContext<'a, 'tcx, O> {
let words_per_id = (bits_per_id + usize::BITS as usize - 1) / usize::BITS as usize;
let num_nodes = cfg.graph.all_nodes().len();
@ -235,7 +235,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
}
}
pub fn add_gen(&mut self, id: ast::NodeId, bit: uint) {
pub fn add_gen(&mut self, id: ast::NodeId, bit: usize) {
//! Indicates that `id` generates `bit`
debug!("{} add_gen(id={}, bit={})",
self.analysis_name, id, bit);
@ -250,7 +250,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
}
}
pub fn add_kill(&mut self, id: ast::NodeId, bit: uint) {
pub fn add_kill(&mut self, id: ast::NodeId, bit: usize) {
//! Indicates that `id` kills `bit`
debug!("{} add_kill(id={}, bit={})",
self.analysis_name, id, bit);
@ -265,7 +265,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
}
}
fn apply_gen_kill(&self, cfgidx: CFGIndex, bits: &mut [uint]) {
fn apply_gen_kill(&self, cfgidx: CFGIndex, bits: &mut [usize]) {
//! Applies the gen and kill sets for `cfgidx` to `bits`
debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [before]",
self.analysis_name, cfgidx, mut_bits_to_string(bits));
@ -281,7 +281,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
self.analysis_name, cfgidx, mut_bits_to_string(bits));
}
fn compute_id_range(&self, cfgidx: CFGIndex) -> (uint, uint) {
fn compute_id_range(&self, cfgidx: CFGIndex) -> (usize, usize) {
let n = cfgidx.node_id();
let start = n * self.words_per_id;
let end = start + self.words_per_id;
@ -296,7 +296,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
pub fn each_bit_on_entry<F>(&self, id: ast::NodeId, mut f: F) -> bool where
F: FnMut(uint) -> bool,
F: FnMut(usize) -> bool,
{
//! Iterates through each bit that is set on entry to `id`.
//! Only useful after `propagate()` has been called.
@ -313,7 +313,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
}
pub fn each_bit_for_node<F>(&self, e: EntryOrExit, cfgidx: CFGIndex, f: F) -> bool where
F: FnMut(uint) -> bool,
F: FnMut(usize) -> bool,
{
//! Iterates through each bit that is set on entry/exit to `cfgidx`.
//! Only useful after `propagate()` has been called.
@ -342,7 +342,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
}
pub fn each_gen_bit<F>(&self, id: ast::NodeId, mut f: F) -> bool where
F: FnMut(uint) -> bool,
F: FnMut(usize) -> bool,
{
//! Iterates through each bit in the gen set for `id`.
if !self.has_bitset_for_nodeid(id) {
@ -368,8 +368,8 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
return true;
}
fn each_bit<F>(&self, words: &[uint], mut f: F) -> bool where
F: FnMut(uint) -> bool,
fn each_bit<F>(&self, words: &[usize], mut f: F) -> bool where
F: FnMut(usize) -> bool,
{
//! Helper for iterating over the bits in a bit set.
//! Returns false on the first call to `f` that returns false;
@ -505,7 +505,7 @@ impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
fn walk_cfg(&mut self,
cfg: &cfg::CFG,
in_out: &mut [uint]) {
in_out: &mut [usize]) {
debug!("DataFlowContext::walk_cfg(in_out={}) {}",
bits_to_string(in_out), self.dfcx.analysis_name);
assert!(self.dfcx.bits_per_id > 0);
@ -529,7 +529,7 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
});
}
fn reset(&mut self, bits: &mut [uint]) {
fn reset(&mut self, bits: &mut [usize]) {
let e = if self.dfcx.oper.initial_value() {usize::MAX} else {0};
for b in bits {
*b = e;
@ -537,7 +537,7 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
}
fn propagate_bits_into_graph_successors_of(&mut self,
pred_bits: &[uint],
pred_bits: &[usize],
cfg: &cfg::CFG,
cfgidx: CFGIndex) {
cfg.graph.each_outgoing_edge(cfgidx, |_e_idx, edge| {
@ -547,7 +547,7 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
}
fn propagate_bits_into_entry_set_for(&mut self,
pred_bits: &[uint],
pred_bits: &[usize],
edge: &cfg::CFGEdge) {
let source = edge.source();
let cfgidx = edge.target();
@ -570,11 +570,11 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
}
}
fn mut_bits_to_string(words: &mut [uint]) -> String {
fn mut_bits_to_string(words: &mut [usize]) -> String {
bits_to_string(words)
}
fn bits_to_string(words: &[uint]) -> String {
fn bits_to_string(words: &[usize]) -> String {
let mut result = String::new();
let mut sep = '[';
@ -594,8 +594,8 @@ fn bits_to_string(words: &[uint]) -> String {
}
#[inline]
fn bitwise<Op:BitwiseOperator>(out_vec: &mut [uint],
in_vec: &[uint],
fn bitwise<Op:BitwiseOperator>(out_vec: &mut [usize],
in_vec: &[usize],
op: &Op) -> bool {
assert_eq!(out_vec.len(), in_vec.len());
let mut changed = false;
@ -608,7 +608,7 @@ fn bitwise<Op:BitwiseOperator>(out_vec: &mut [uint],
changed
}
fn set_bit(words: &mut [uint], bit: uint) -> bool {
fn set_bit(words: &mut [usize], bit: usize) -> bool {
debug!("set_bit: words={} bit={}",
mut_bits_to_string(words), bit_str(bit));
let word = bit / usize::BITS as usize;
@ -621,7 +621,7 @@ fn set_bit(words: &mut [uint], bit: uint) -> bool {
oldv != newv
}
fn bit_str(bit: uint) -> String {
fn bit_str(bit: usize) -> String {
let byte = bit >> 8;
let lobits = 1 << (bit & 0xFF);
format!("[{}:{}-{:02x}]", bit, byte, lobits)
@ -629,9 +629,9 @@ fn bit_str(bit: uint) -> String {
struct Union;
impl BitwiseOperator for Union {
fn join(&self, a: uint, b: uint) -> uint { a | b }
fn join(&self, a: usize, b: usize) -> usize { a | b }
}
struct Subtract;
impl BitwiseOperator for Subtract {
fn join(&self, a: uint, b: uint) -> uint { a & !b }
fn join(&self, a: usize, b: usize) -> usize { a & !b }
}

View File

@ -145,7 +145,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
}
}
fn handle_tup_field_access(&mut self, lhs: &ast::Expr, idx: uint) {
fn handle_tup_field_access(&mut self, lhs: &ast::Expr, idx: usize) {
match ty::expr_ty_adjusted(self.tcx, lhs).sty {
ty::ty_struct(id, _) => {
let fields = ty::lookup_struct_fields(self.tcx, id);

View File

@ -172,7 +172,7 @@ fn calculate_type(sess: &session::Session,
assert!(src.rlib.is_some());
debug!("adding staticlib: {}", data.name);
add_library(sess, cnum, cstore::RequireStatic, &mut formats);
ret[cnum as uint - 1] = Some(cstore::RequireStatic);
ret[cnum as usize - 1] = Some(cstore::RequireStatic);
}
});

View File

@ -823,7 +823,7 @@ impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,'tcx,TYPER> {
/// `deref()` is declared with `&self`, this is an autoref of `x`.
fn walk_autoderefs(&mut self,
expr: &ast::Expr,
autoderefs: uint) {
autoderefs: usize) {
debug!("walk_autoderefs expr={} autoderefs={}", expr.repr(self.tcx()), autoderefs);
for i in 0..autoderefs {
@ -855,7 +855,7 @@ impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,'tcx,TYPER> {
fn walk_autoref(&mut self,
expr: &ast::Expr,
autoref: &ty::AutoRef,
n: uint) {
n: usize) {
debug!("walk_autoref expr={}", expr.repr(self.tcx()));
match *autoref {

View File

@ -25,11 +25,11 @@ pub enum SimplifiedType {
StrSimplifiedType,
VecSimplifiedType,
PtrSimplifiedType,
TupleSimplifiedType(uint),
TupleSimplifiedType(usize),
TraitSimplifiedType(ast::DefId),
StructSimplifiedType(ast::DefId),
ClosureSimplifiedType(ast::DefId),
FunctionSimplifiedType(uint),
FunctionSimplifiedType(usize),
ParameterSimplifiedType,
}

View File

@ -62,33 +62,33 @@ impl<E: Debug> Debug for Edge<E> {
}
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct NodeIndex(pub uint);
pub struct NodeIndex(pub usize);
#[allow(non_upper_case_globals)]
pub const InvalidNodeIndex: NodeIndex = NodeIndex(usize::MAX);
#[derive(Copy, PartialEq, Debug)]
pub struct EdgeIndex(pub uint);
pub struct EdgeIndex(pub usize);
#[allow(non_upper_case_globals)]
pub const InvalidEdgeIndex: EdgeIndex = EdgeIndex(usize::MAX);
// Use a private field here to guarantee no more instances are created:
#[derive(Copy, Debug)]
pub struct Direction { repr: uint }
pub struct Direction { repr: usize }
#[allow(non_upper_case_globals)]
pub const Outgoing: Direction = Direction { repr: 0 };
#[allow(non_upper_case_globals)]
pub const Incoming: Direction = Direction { repr: 1 };
impl NodeIndex {
fn get(&self) -> uint { let NodeIndex(v) = *self; v }
fn get(&self) -> usize { let NodeIndex(v) = *self; v }
/// Returns unique id (unique with respect to the graph holding associated node).
pub fn node_id(&self) -> uint { self.get() }
pub fn node_id(&self) -> usize { self.get() }
}
impl EdgeIndex {
fn get(&self) -> uint { let EdgeIndex(v) = *self; v }
fn get(&self) -> usize { let EdgeIndex(v) = *self; v }
/// Returns unique id (unique with respect to the graph holding associated edge).
pub fn edge_id(&self) -> uint { self.get() }
pub fn edge_id(&self) -> usize { self.get() }
}
impl<N,E> Graph<N,E> {
@ -99,8 +99,8 @@ impl<N,E> Graph<N,E> {
}
}
pub fn with_capacity(num_nodes: uint,
num_edges: uint) -> Graph<N,E> {
pub fn with_capacity(num_nodes: usize,
num_edges: usize) -> Graph<N,E> {
Graph {
nodes: Vec::with_capacity(num_nodes),
edges: Vec::with_capacity(num_edges),
@ -275,7 +275,7 @@ impl<N,E> Graph<N,E> {
// computation.
pub fn iterate_until_fixed_point<'a, F>(&'a self, mut op: F) where
F: FnMut(uint, EdgeIndex, &'a Edge<E>) -> bool,
F: FnMut(usize, EdgeIndex, &'a Edge<E>) -> bool,
{
let mut iteration = 0;
let mut changed = true;

View File

@ -1766,7 +1766,7 @@ fn lifetimes_in_scope(tcx: &ty::ctxt,
// LifeGiver is responsible for generating fresh lifetime names
struct LifeGiver {
taken: HashSet<String>,
counter: Cell<uint>,
counter: Cell<usize>,
generated: RefCell<Vec<ast::Lifetime>>,
}
@ -1806,7 +1806,7 @@ impl LifeGiver {
return lifetime;
// 0 .. 25 generates a .. z, 26 .. 51 generates aa .. zz, and so on
fn num_to_string(counter: uint) -> String {
fn num_to_string(counter: usize) -> String {
let mut s = String::new();
let (n, r) = (counter/26 + 1, counter % 26);
let letter: char = from_u32((r+97) as u32).unwrap();

View File

@ -811,7 +811,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
ty::mk_var(self.tcx, self.next_ty_var_id(true))
}
pub fn next_ty_vars(&self, n: uint) -> Vec<Ty<'tcx>> {
pub fn next_ty_vars(&self, n: usize) -> Vec<Ty<'tcx>> {
(0..n).map(|_i| self.next_ty_var()).collect()
}

View File

@ -121,7 +121,7 @@ struct ConstraintGraph<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
graph_name: String,
map: &'a FnvHashMap<Constraint, SubregionOrigin<'tcx>>,
node_ids: FnvHashMap<Node, uint>,
node_ids: FnvHashMap<Node, usize>,
}
#[derive(Clone, Hash, PartialEq, Eq, Debug, Copy)]

View File

@ -86,7 +86,7 @@ pub enum UndoLogEntry {
CommitedSnapshot,
AddVar(RegionVid),
AddConstraint(Constraint),
AddVerify(uint),
AddVerify(usize),
AddGiven(ty::FreeRegion, ty::RegionVid),
AddCombination(CombineMapType, TwoRegions)
}
@ -224,7 +224,7 @@ pub struct RegionVarBindings<'a, 'tcx: 'a> {
#[derive(Debug)]
pub struct RegionSnapshot {
length: uint,
length: usize,
skolemization_count: u32,
}
@ -284,7 +284,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
AddVar(vid) => {
let mut var_origins = self.var_origins.borrow_mut();
var_origins.pop().unwrap();
assert_eq!(var_origins.len(), vid.index as uint);
assert_eq!(var_origins.len(), vid.index as usize);
}
AddConstraint(ref constraint) => {
self.constraints.borrow_mut().remove(constraint);
@ -312,7 +312,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
pub fn num_vars(&self) -> u32 {
let len = self.var_origins.borrow().len();
// enforce no overflow
assert!(len as u32 as uint == len);
assert!(len as u32 as usize == len);
len as u32
}
@ -557,7 +557,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
match *self.values.borrow() {
None => {
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[rid.index as uint].span(),
(*self.var_origins.borrow())[rid.index as usize].span(),
"attempt to resolve region variable before values have \
been computed!")
}
@ -629,7 +629,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
let mut result_set = vec!(r0);
let mut result_index = 0;
while result_index < result_set.len() {
// nb: can't use uint::range() here because result_set grows
// nb: can't use usize::range() here because result_set grows
let r = result_set[result_index];
debug!("result_index={}, r={:?}", result_index, r);
@ -746,7 +746,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
(ReInfer(ReVar(v_id)), _) | (_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[v_id.index as uint].span(),
(*self.var_origins.borrow())[v_id.index as usize].span(),
&format!("lub_concrete_regions invoked with \
non-concrete regions: {:?}, {:?}",
a,
@ -850,7 +850,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
(ReInfer(ReVar(v_id)), _) |
(_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[v_id.index as uint].span(),
(*self.var_origins.borrow())[v_id.index as usize].span(),
&format!("glb_concrete_regions invoked with \
non-concrete regions: {:?}, {:?}",
a,
@ -984,7 +984,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
fn construct_var_data(&self) -> Vec<VarData> {
(0..self.num_vars() as uint).map(|_| {
(0..self.num_vars() as usize).map(|_| {
VarData {
// All nodes are initially classified as contracting; during
// the expansion phase, we will shift the classification for
@ -1013,14 +1013,14 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
.repr(self.tcx));
match *constraint {
ConstrainRegSubVar(a_region, b_vid) => {
let b_data = &mut var_data[b_vid.index as uint];
let b_data = &mut var_data[b_vid.index as usize];
self.expand_node(a_region, b_vid, b_data)
}
ConstrainVarSubVar(a_vid, b_vid) => {
match var_data[a_vid.index as uint].value {
match var_data[a_vid.index as usize].value {
NoValue | ErrorValue => false,
Value(a_region) => {
let b_node = &mut var_data[b_vid.index as uint];
let b_node = &mut var_data[b_vid.index as usize];
self.expand_node(a_region, b_vid, b_node)
}
}
@ -1101,16 +1101,16 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
false
}
ConstrainVarSubVar(a_vid, b_vid) => {
match var_data[b_vid.index as uint].value {
match var_data[b_vid.index as usize].value {
NoValue | ErrorValue => false,
Value(b_region) => {
let a_data = &mut var_data[a_vid.index as uint];
let a_data = &mut var_data[a_vid.index as usize];
self.contract_node(a_vid, a_data, b_region)
}
}
}
ConstrainVarSubReg(a_vid, b_region) => {
let a_data = &mut var_data[a_vid.index as uint];
let a_data = &mut var_data[a_vid.index as usize];
self.contract_node(a_vid, a_data, b_region)
}
}
@ -1250,11 +1250,11 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
// idea is to report errors that derive from independent
// regions of the graph, but not those that derive from
// overlapping locations.
let mut dup_vec: Vec<_> = repeat(u32::MAX).take(self.num_vars() as uint).collect();
let mut dup_vec: Vec<_> = repeat(u32::MAX).take(self.num_vars() as usize).collect();
let mut opt_graph = None;
for idx in 0..self.num_vars() as uint {
for idx in 0..self.num_vars() as usize {
match var_data[idx].value {
Value(_) => {
/* Inference successful */
@ -1311,7 +1311,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
}
(0..self.num_vars() as uint).map(|idx| var_data[idx].value).collect()
(0..self.num_vars() as usize).map(|idx| var_data[idx].value).collect()
}
fn construct_graph(&self) -> RegionGraph {
@ -1320,7 +1320,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
let constraints = self.constraints.borrow();
let num_edges = constraints.len();
let mut graph = graph::Graph::with_capacity(num_vars as uint + 1,
let mut graph = graph::Graph::with_capacity(num_vars as usize + 1,
num_edges);
for _ in 0..num_vars {
@ -1331,17 +1331,17 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
for (constraint, _) in &*constraints {
match *constraint {
ConstrainVarSubVar(a_id, b_id) => {
graph.add_edge(NodeIndex(a_id.index as uint),
NodeIndex(b_id.index as uint),
graph.add_edge(NodeIndex(a_id.index as usize),
NodeIndex(b_id.index as usize),
*constraint);
}
ConstrainRegSubVar(_, b_id) => {
graph.add_edge(dummy_idx,
NodeIndex(b_id.index as uint),
NodeIndex(b_id.index as usize),
*constraint);
}
ConstrainVarSubReg(a_id, _) => {
graph.add_edge(NodeIndex(a_id.index as uint),
graph.add_edge(NodeIndex(a_id.index as usize),
dummy_idx,
*constraint);
}
@ -1395,7 +1395,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
debug!("pushing SubSupConflict sub: {:?} sup: {:?}",
lower_bound.region, upper_bound.region);
errors.push(SubSupConflict(
(*self.var_origins.borrow())[node_idx.index as uint].clone(),
(*self.var_origins.borrow())[node_idx.index as usize].clone(),
lower_bound.origin.clone(),
lower_bound.region,
upper_bound.origin.clone(),
@ -1406,7 +1406,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[node_idx.index as uint].span(),
(*self.var_origins.borrow())[node_idx.index as usize].span(),
&format!("collect_error_for_expanding_node() could not find error \
for var {:?}, lower_bounds={}, upper_bounds={}",
node_idx,
@ -1439,7 +1439,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
Ok(_) => {}
Err(_) => {
errors.push(SupSupConflict(
(*self.var_origins.borrow())[node_idx.index as uint].clone(),
(*self.var_origins.borrow())[node_idx.index as usize].clone(),
upper_bound_1.origin.clone(),
upper_bound_1.region,
upper_bound_2.origin.clone(),
@ -1451,7 +1451,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[node_idx.index as uint].span(),
(*self.var_origins.borrow())[node_idx.index as usize].span(),
&format!("collect_error_for_contracting_node() could not find error \
for var {:?}, upper_bounds={}",
node_idx,
@ -1485,12 +1485,12 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
while !state.stack.is_empty() {
let node_idx = state.stack.pop().unwrap();
let classification = var_data[node_idx.index as uint].classification;
let classification = var_data[node_idx.index as usize].classification;
// check whether we've visited this node on some previous walk
if dup_vec[node_idx.index as uint] == u32::MAX {
dup_vec[node_idx.index as uint] = orig_node_idx.index;
} else if dup_vec[node_idx.index as uint] != orig_node_idx.index {
if dup_vec[node_idx.index as usize] == u32::MAX {
dup_vec[node_idx.index as usize] = orig_node_idx.index;
} else if dup_vec[node_idx.index as usize] != orig_node_idx.index {
state.dup_found = true;
}
@ -1518,7 +1518,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
dir: Direction) {
debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);
let source_node_index = NodeIndex(source_vid.index as uint);
let source_node_index = NodeIndex(source_vid.index as usize);
graph.each_adjacent_edge(source_node_index, dir, |_, edge| {
match edge.data {
ConstrainVarSubVar(from_vid, to_vid) => {
@ -1603,7 +1603,7 @@ fn normalize(values: &Vec<VarValue>, r: ty::Region) -> ty::Region {
}
fn lookup(values: &Vec<VarValue>, rid: ty::RegionVid) -> ty::Region {
match values[rid.index as uint] {
match values[rid.index as usize] {
Value(r) => r,
NoValue => ReEmpty, // No constraints, return ty::ReEmpty
ErrorValue => ReStatic, // Previously reported error.

View File

@ -69,11 +69,11 @@ impl<'tcx> TypeVariableTable<'tcx> {
}
fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
relations(self.values.get_mut(a.index as uint))
relations(self.values.get_mut(a.index as usize))
}
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
self.values.get(vid.index as uint).diverging
self.values.get(vid.index as usize).diverging
}
/// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
@ -97,7 +97,7 @@ impl<'tcx> TypeVariableTable<'tcx> {
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
let old_value = {
let value_ptr = &mut self.values.get_mut(vid.index as uint).value;
let value_ptr = &mut self.values.get_mut(vid.index as usize).value;
mem::replace(value_ptr, Known(ty))
};
@ -123,7 +123,7 @@ impl<'tcx> TypeVariableTable<'tcx> {
}
pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
match self.values.get(vid.index as uint).value {
match self.values.get(vid.index as usize).value {
Bounded(..) => None,
Known(t) => Some(t)
}
@ -206,12 +206,12 @@ impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> {
action: UndoEntry) {
match action {
SpecifyVar(vid, relations) => {
values[vid.index as uint].value = Bounded(relations);
values[vid.index as usize].value = Bounded(relations);
}
Relate(a, b) => {
relations(&mut (*values)[a.index as uint]).pop();
relations(&mut (*values)[b.index as uint]).pop();
relations(&mut (*values)[a.index as usize]).pop();
relations(&mut (*values)[b.index as usize]).pop();
}
}
}

View File

@ -35,9 +35,9 @@ use util::snapshot_vec as sv;
pub trait UnifyKey : Clone + Debug + PartialEq {
type Value : UnifyValue;
fn index(&self) -> uint;
fn index(&self) -> usize;
fn from_index(u: uint) -> Self;
fn from_index(u: usize) -> Self;
// Given an inference context, returns the unification table
// appropriate to this key type.
@ -67,7 +67,7 @@ pub trait UnifyValue : Clone + PartialEq + Debug {
#[derive(PartialEq,Clone,Debug)]
pub enum VarValue<K:UnifyKey> {
Redirect(K),
Root(K::Value, uint),
Root(K::Value, usize),
}
/// Table of unification keys and their values.
@ -89,7 +89,7 @@ pub struct Snapshot<K:UnifyKey> {
pub struct Node<K:UnifyKey> {
pub key: K,
pub value: K::Value,
pub rank: uint,
pub rank: usize,
}
#[derive(Copy)]
@ -186,7 +186,7 @@ impl<K:UnifyKey> UnificationTable<K> {
tcx: &ty::ctxt<'tcx>,
node_a: &Node<K>,
node_b: &Node<K>)
-> (K, uint)
-> (K, usize)
{
debug!("unify(node_a(id={:?}, rank={:?}), node_b(id={:?}, rank={:?}))",
node_a.key,
@ -358,9 +358,9 @@ impl<'a,'tcx,V,K> InferCtxtMethodsForSimplyUnifiableTypes<'tcx,K,V> for InferCtx
impl UnifyKey for ty::IntVid {
type Value = Option<IntVarValue>;
fn index(&self) -> uint { self.index as uint }
fn index(&self) -> usize { self.index as usize }
fn from_index(i: uint) -> ty::IntVid { ty::IntVid { index: i as u32 } }
fn from_index(i: usize) -> ty::IntVid { ty::IntVid { index: i as u32 } }
fn unification_table<'v>(infcx: &'v InferCtxt) -> &'v RefCell<UnificationTable<ty::IntVid>> {
return &infcx.int_unification_table;
@ -391,9 +391,9 @@ impl UnifyValue for Option<IntVarValue> { }
impl UnifyKey for ty::FloatVid {
type Value = Option<ast::FloatTy>;
fn index(&self) -> uint { self.index as uint }
fn index(&self) -> usize { self.index as usize }
fn from_index(i: uint) -> ty::FloatVid { ty::FloatVid { index: i as u32 } }
fn from_index(i: usize) -> ty::FloatVid { ty::FloatVid { index: i as u32 } }
fn unification_table<'v>(infcx: &'v InferCtxt) -> &'v RefCell<UnificationTable<ty::FloatVid>> {
return &infcx.float_unification_table;

View File

@ -28,8 +28,8 @@ pub fn check_crate(tcx: &ctxt) {
let mut visitor = IntrinsicCheckingVisitor {
tcx: tcx,
param_envs: Vec::new(),
dummy_sized_ty: tcx.types.int,
dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.int, None),
dummy_sized_ty: tcx.types.isize,
dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None),
};
visit::walk_crate(&mut visitor, tcx.map.krate());
}

View File

@ -70,7 +70,7 @@ impl LanguageItems {
self.items.iter().enumerate()
}
pub fn item_name(index: uint) -> &'static str {
pub fn item_name(index: usize) -> &'static str {
let item: Option<LangItem> = FromPrimitive::from_usize(index);
match item {
$( Some($variant) => $name, )*
@ -79,11 +79,11 @@ impl LanguageItems {
}
pub fn require(&self, it: LangItem) -> Result<ast::DefId, String> {
match self.items[it as uint] {
match self.items[it as usize] {
Some(id) => Ok(id),
None => {
Err(format!("requires `{}` lang_item",
LanguageItems::item_name(it as uint)))
LanguageItems::item_name(it as usize)))
}
}
}
@ -132,7 +132,7 @@ impl LanguageItems {
$(
#[allow(dead_code)]
pub fn $method(&self) -> Option<ast::DefId> {
self.items[$variant as uint]
self.items[$variant as usize]
}
)*
}
@ -142,7 +142,7 @@ struct LanguageItemCollector<'a> {
session: &'a Session,
item_refs: FnvHashMap<&'static str, uint>,
item_refs: FnvHashMap<&'static str, usize>,
}
impl<'a, 'v> Visitor<'v> for LanguageItemCollector<'a> {
@ -163,7 +163,7 @@ impl<'a> LanguageItemCollector<'a> {
pub fn new(session: &'a Session) -> LanguageItemCollector<'a> {
let mut item_refs = FnvHashMap();
$( item_refs.insert($name, $variant as uint); )*
$( item_refs.insert($name, $variant as usize); )*
LanguageItemCollector {
session: session,
@ -172,7 +172,7 @@ impl<'a> LanguageItemCollector<'a> {
}
}
pub fn collect_item(&mut self, item_index: uint,
pub fn collect_item(&mut self, item_index: usize,
item_def_id: ast::DefId, span: Span) {
// Check for duplicates.
match self.items.items[item_index] {

View File

@ -94,7 +94,7 @@ pub enum categorization<'tcx> {
cat_static_item,
cat_upvar(Upvar), // upvar referenced by closure env
cat_local(ast::NodeId), // local variable
cat_deref(cmt<'tcx>, uint, PointerKind), // deref of a ptr
cat_deref(cmt<'tcx>, usize, PointerKind), // deref of a ptr
cat_interior(cmt<'tcx>, InteriorKind), // something interior: field, tuple, etc
cat_downcast(cmt<'tcx>, ast::DefId), // selects a particular enum variant (*1)
@ -135,7 +135,7 @@ pub enum InteriorKind {
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum FieldName {
NamedField(ast::Name),
PositionalField(uint)
PositionalField(usize)
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
@ -462,7 +462,7 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
pub fn cat_expr_autoderefd(&self,
expr: &ast::Expr,
autoderefs: uint)
autoderefs: usize)
-> McResult<cmt<'tcx>> {
let mut cmt = try!(self.cat_expr_unadjusted(expr));
debug!("cat_expr_autoderefd: autoderefs={}, cmt={}",
@ -868,7 +868,7 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
pub fn cat_tup_field<N:ast_node>(&self,
node: &N,
base_cmt: cmt<'tcx>,
f_idx: uint,
f_idx: usize,
f_ty: Ty<'tcx>)
-> cmt<'tcx> {
Rc::new(cmt_ {
@ -884,7 +884,7 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
fn cat_deref<N:ast_node>(&self,
node: &N,
base_cmt: cmt<'tcx>,
deref_cnt: uint,
deref_cnt: usize,
deref_context: DerefKindContext)
-> McResult<cmt<'tcx>> {
let adjustment = match self.typer.adjustments().borrow().get(&node.id()) {
@ -928,7 +928,7 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
fn cat_deref_common<N:ast_node>(&self,
node: &N,
base_cmt: cmt<'tcx>,
deref_cnt: uint,
deref_cnt: usize,
deref_ty: Ty<'tcx>,
deref_context: DerefKindContext,
implicit: bool)

View File

@ -136,7 +136,7 @@ impl DestructionScopeData {
RustcDecodable, Debug, Copy)]
pub struct BlockRemainder {
pub block: ast::NodeId,
pub first_statement_index: uint,
pub first_statement_index: usize,
}
impl CodeExtent {
@ -284,7 +284,7 @@ impl InnermostDeclaringBlock {
struct DeclaringStatementContext {
stmt_id: ast::NodeId,
block_id: ast::NodeId,
stmt_index: uint,
stmt_index: usize,
}
impl DeclaringStatementContext {

View File

@ -98,7 +98,7 @@ impl<'tcx> Substs<'tcx> {
}
pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
*self.types.get(ty_param_def.space, ty_param_def.index as uint)
*self.types.get(ty_param_def.space, ty_param_def.index as usize)
}
pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
@ -193,7 +193,7 @@ impl ParamSpace {
[TypeSpace, SelfSpace, FnSpace]
}
pub fn to_uint(self) -> uint {
pub fn to_uint(self) -> usize {
match self {
TypeSpace => 0,
SelfSpace => 1,
@ -201,7 +201,7 @@ impl ParamSpace {
}
}
pub fn from_uint(u: uint) -> ParamSpace {
pub fn from_uint(u: usize) -> ParamSpace {
match u {
0 => TypeSpace,
1 => SelfSpace,
@ -226,8 +226,8 @@ pub struct VecPerParamSpace<T> {
// AF(self) = (self.content[..self.type_limit],
// self.content[self.type_limit..self.self_limit],
// self.content[self.self_limit..])
type_limit: uint,
self_limit: uint,
type_limit: usize,
self_limit: usize,
content: Vec<T>,
}
@ -251,7 +251,7 @@ impl<T: fmt::Debug> fmt::Debug for VecPerParamSpace<T> {
}
impl<T> VecPerParamSpace<T> {
fn limits(&self, space: ParamSpace) -> (uint, uint) {
fn limits(&self, space: ParamSpace) -> (usize, usize) {
match space {
TypeSpace => (0, self.type_limit),
SelfSpace => (self.type_limit, self.self_limit),
@ -290,7 +290,7 @@ impl<T> VecPerParamSpace<T> {
}
}
fn new_internal(content: Vec<T>, type_limit: uint, self_limit: uint)
fn new_internal(content: Vec<T>, type_limit: usize, self_limit: usize)
-> VecPerParamSpace<T>
{
VecPerParamSpace {
@ -343,7 +343,7 @@ impl<T> VecPerParamSpace<T> {
}
}
pub fn truncate(&mut self, space: ParamSpace, len: uint) {
pub fn truncate(&mut self, space: ParamSpace, len: usize) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
while self.len(space) > len {
self.pop(space);
@ -364,7 +364,7 @@ impl<T> VecPerParamSpace<T> {
if v.len() == 0 { None } else { Some(&v[0]) }
}
pub fn len(&self, space: ParamSpace) -> uint {
pub fn len(&self, space: ParamSpace) -> usize {
self.get_slice(space).len()
}
@ -384,13 +384,13 @@ impl<T> VecPerParamSpace<T> {
pub fn opt_get<'a>(&'a self,
space: ParamSpace,
index: uint)
index: usize)
-> Option<&'a T> {
let v = self.get_slice(space);
if index < v.len() { Some(&v[index]) } else { None }
}
pub fn get<'a>(&'a self, space: ParamSpace, index: uint) -> &'a T {
pub fn get<'a>(&'a self, space: ParamSpace, index: usize) -> &'a T {
&self.get_slice(space)[index]
}
@ -441,7 +441,7 @@ impl<T> VecPerParamSpace<T> {
}
pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where
P: FnMut((ParamSpace, uint, &T)) -> U,
P: FnMut((ParamSpace, usize, &T)) -> U,
{
let result = self.iter_enumerated().map(pred).collect();
VecPerParamSpace::new_internal(result,
@ -487,8 +487,8 @@ impl<T> VecPerParamSpace<T> {
#[derive(Clone)]
pub struct EnumeratedItems<'a,T:'a> {
vec: &'a VecPerParamSpace<T>,
space_index: uint,
elem_index: uint
space_index: usize,
elem_index: usize
}
impl<'a,T> EnumeratedItems<'a,T> {
@ -511,9 +511,9 @@ impl<'a,T> EnumeratedItems<'a,T> {
}
impl<'a,T> Iterator for EnumeratedItems<'a,T> {
type Item = (ParamSpace, uint, &'a T);
type Item = (ParamSpace, usize, &'a T);
fn next(&mut self) -> Option<(ParamSpace, uint, &'a T)> {
fn next(&mut self) -> Option<(ParamSpace, usize, &'a T)> {
let spaces = ParamSpace::all();
if self.space_index < spaces.len() {
let space = spaces[self.space_index];
@ -598,7 +598,7 @@ struct SubstFolder<'a, 'tcx: 'a> {
root_ty: Option<Ty<'tcx>>,
// Depth of type stack
ty_stack_depth: uint,
ty_stack_depth: usize,
// Number of region binders we have passed through while doing the substitution
region_binders_passed: u32,
@ -626,7 +626,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
match self.substs.regions {
ErasedRegions => ty::ReStatic,
NonerasedRegions(ref regions) =>
match regions.opt_get(space, i as uint) {
match regions.opt_get(space, i as usize) {
Some(&r) => {
self.shift_region_through_binders(r)
}
@ -682,7 +682,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
impl<'a,'tcx> SubstFolder<'a,'tcx> {
fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
// Look up the type in the substitutions. It really should be in there.
let opt_ty = self.substs.types.opt_get(p.space, p.idx as uint);
let opt_ty = self.substs.types.opt_get(p.space, p.idx as usize);
let ty = match opt_ty {
Some(t) => *t,
None => {

View File

@ -54,7 +54,7 @@ pub struct FulfillmentContext<'tcx> {
// Remembers the count of trait obligations that we have already
// attempted to select. This is used to avoid repeating work
// when `select_new_obligations` is called.
attempted_mark: uint,
attempted_mark: usize,
// A set of constraints that regionck must validate. Each
// constraint has the form `T:'a`, meaning "some type `T` must

View File

@ -68,7 +68,7 @@ mod util;
#[derive(Clone, PartialEq, Eq)]
pub struct Obligation<'tcx, T> {
pub cause: ObligationCause<'tcx>,
pub recursion_depth: uint,
pub recursion_depth: usize,
pub predicate: T,
}
@ -482,7 +482,7 @@ impl<'tcx,O> Obligation<'tcx,O> {
}
fn with_depth(cause: ObligationCause<'tcx>,
recursion_depth: uint,
recursion_depth: usize,
trait_ref: O)
-> Obligation<'tcx, O>
{

View File

@ -197,7 +197,7 @@ pub fn normalize<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
/// As `normalize`, but with a custom depth.
pub fn normalize_with_depth<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>,
cause: ObligationCause<'tcx>,
depth: uint,
depth: usize,
value: &T)
-> Normalized<'tcx, T>
where T : TypeFoldable<'tcx> + HasProjectionTypes + Clone + Repr<'tcx>
@ -214,13 +214,13 @@ struct AssociatedTypeNormalizer<'a,'b:'a,'tcx:'b> {
selcx: &'a mut SelectionContext<'b,'tcx>,
cause: ObligationCause<'tcx>,
obligations: Vec<PredicateObligation<'tcx>>,
depth: uint,
depth: usize,
}
impl<'a,'b,'tcx> AssociatedTypeNormalizer<'a,'b,'tcx> {
fn new(selcx: &'a mut SelectionContext<'b,'tcx>,
cause: ObligationCause<'tcx>,
depth: uint)
depth: usize)
-> AssociatedTypeNormalizer<'a,'b,'tcx>
{
AssociatedTypeNormalizer {
@ -314,7 +314,7 @@ pub fn normalize_projection_type<'a,'b,'tcx>(
selcx: &'a mut SelectionContext<'b,'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
depth: uint)
depth: usize)
-> NormalizedTy<'tcx>
{
opt_normalize_projection_type(selcx, projection_ty.clone(), cause.clone(), depth)
@ -344,7 +344,7 @@ fn opt_normalize_projection_type<'a,'b,'tcx>(
selcx: &'a mut SelectionContext<'b,'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
depth: uint)
depth: usize)
-> Option<NormalizedTy<'tcx>>
{
debug!("normalize_projection_type(\
@ -412,7 +412,7 @@ fn opt_normalize_projection_type<'a,'b,'tcx>(
fn normalize_to_error<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
depth: uint)
depth: usize)
-> NormalizedTy<'tcx>
{
let trait_ref = projection_ty.trait_ref.to_poly_trait_ref();
@ -699,10 +699,10 @@ fn assemble_candidates_from_impls<'cx,'tcx>(
// But wait, you say! What about an example like this:
//
// ```
// fn bar<T:SomeTrait<Foo=uint>>(...) { ... }
// fn bar<T:SomeTrait<Foo=usize>>(...) { ... }
// ```
//
// Doesn't the `T : Sometrait<Foo=uint>` predicate help
// Doesn't the `T : Sometrait<Foo=usize>` predicate help
// resolve `T::Foo`? And of course it does, but in fact
// that single predicate is desugared into two predicates
// in the compiler: a trait predicate (`T : SomeTrait`) and a

View File

@ -110,7 +110,7 @@ pub enum MethodMatchedData {
/// The selection process begins by considering all impls, where
/// clauses, and so forth that might resolve an obligation. Sometimes
/// we'll be able to say definitively that (e.g.) an impl does not
/// apply to the obligation: perhaps it is defined for `uint` but the
/// apply to the obligation: perhaps it is defined for `usize` but the
/// obligation is for `int`. In that case, we drop the impl out of the
/// list. But the other cases are considered *candidates*.
///
@ -627,7 +627,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// for example, we are looking for $0:Eq where $0 is some
// unconstrained type variable. In that case, we'll get a
// candidate which assumes $0 == int, one that assumes $0 ==
// uint, etc. This spells an ambiguity.
// usize, etc. This spells an ambiguity.
// If there is more than one candidate, first winnow them down
// by considering extra conditions (nested obligations and so
@ -2010,7 +2010,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
impl_def_id: ast::DefId,
substs: Normalized<'tcx, Substs<'tcx>>,
cause: ObligationCause<'tcx>,
recursion_depth: uint,
recursion_depth: usize,
skol_map: infer::SkolemizationMap,
snapshot: &infer::CombinedSnapshot)
-> VtableImplData<'tcx, PredicateObligation<'tcx>>
@ -2142,9 +2142,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
///
/// impl Fn(int) for Closure { ... }
///
/// Now imagine our obligation is `Fn(uint) for Closure`. So far
/// Now imagine our obligation is `Fn(usize) for Closure`. So far
/// we have matched the self-type `Closure`. At this point we'll
/// compare the `int` to `uint` and generate an error.
/// compare the `int` to `usize` and generate an error.
///
/// Note that this checking occurs *after* the impl has selected,
/// because these output type parameters should not affect the
@ -2441,7 +2441,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
/// impl.
fn impl_or_trait_obligations(&mut self,
cause: ObligationCause<'tcx>,
recursion_depth: uint,
recursion_depth: usize,
def_id: ast::DefId, // of impl or trait
substs: &Substs<'tcx>, // for impl or trait
skol_map: infer::SkolemizationMap,

View File

@ -278,7 +278,7 @@ impl<'tcx> fmt::Debug for super::VtableObjectData<'tcx> {
/// See `super::obligations_for_generics`
pub fn predicates_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
cause: ObligationCause<'tcx>,
recursion_depth: uint,
recursion_depth: usize,
generic_bounds: &ty::InstantiatedPredicates<'tcx>)
-> VecPerParamSpace<PredicateObligation<'tcx>>
{
@ -316,7 +316,7 @@ pub fn trait_ref_for_builtin_bound<'tcx>(
pub fn predicate_for_trait_ref<'tcx>(
cause: ObligationCause<'tcx>,
trait_ref: Rc<ty::TraitRef<'tcx>>,
recursion_depth: uint)
recursion_depth: usize)
-> Result<PredicateObligation<'tcx>, ErrorReported>
{
Ok(Obligation {
@ -330,7 +330,7 @@ pub fn predicate_for_trait_def<'tcx>(
tcx: &ty::ctxt<'tcx>,
cause: ObligationCause<'tcx>,
trait_def_id: ast::DefId,
recursion_depth: uint,
recursion_depth: usize,
param_ty: Ty<'tcx>)
-> Result<PredicateObligation<'tcx>, ErrorReported>
{
@ -345,7 +345,7 @@ pub fn predicate_for_builtin_bound<'tcx>(
tcx: &ty::ctxt<'tcx>,
cause: ObligationCause<'tcx>,
builtin_bound: ty::BuiltinBound,
recursion_depth: uint,
recursion_depth: usize,
param_ty: Ty<'tcx>)
-> Result<PredicateObligation<'tcx>, ErrorReported>
{
@ -377,7 +377,7 @@ pub fn upcast<'tcx>(tcx: &ty::ctxt<'tcx>,
pub fn get_vtable_index_of_object_method<'tcx>(tcx: &ty::ctxt<'tcx>,
object_trait_ref: ty::PolyTraitRef<'tcx>,
trait_def_id: ast::DefId,
method_offset_in_trait: uint) -> uint {
method_offset_in_trait: usize) -> usize {
// We need to figure the "real index" of the method in a
// listing of all the methods of an object. We do this by
// iterating down the supertraits of the object's trait until

View File

@ -261,8 +261,8 @@ pub struct field_ty {
#[derive(Copy, PartialEq, Eq, Hash)]
pub struct creader_cache_key {
pub cnum: CrateNum,
pub pos: uint,
pub len: uint
pub pos: usize,
pub len: usize
}
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
@ -288,18 +288,18 @@ pub enum AutoAdjustment<'tcx> {
#[derive(Clone, PartialEq, Debug)]
pub enum UnsizeKind<'tcx> {
// [T, ..n] -> [T], the uint field is n.
UnsizeLength(uint),
// [T, ..n] -> [T], the usize field is n.
UnsizeLength(usize),
// An unsize coercion applied to the tail field of a struct.
// The uint is the index of the type parameter which is unsized.
UnsizeStruct(Box<UnsizeKind<'tcx>>, uint),
// The usize is the index of the type parameter which is unsized.
UnsizeStruct(Box<UnsizeKind<'tcx>>, usize),
UnsizeVtable(TyTrait<'tcx>, /* the self type of the trait */ Ty<'tcx>),
UnsizeUpcast(Ty<'tcx>),
}
#[derive(Clone, Debug)]
pub struct AutoDerefRef<'tcx> {
pub autoderefs: uint,
pub autoderefs: usize,
pub autoref: Option<AutoRef<'tcx>>
}
@ -423,7 +423,7 @@ pub fn type_of_adjust<'tcx>(cx: &ctxt<'tcx>, adj: &AutoAdjustment<'tcx>) -> Opti
#[derive(Clone, Copy, RustcEncodable, RustcDecodable, PartialEq, PartialOrd, Debug)]
pub struct param_index {
pub space: subst::ParamSpace,
pub index: uint
pub index: usize
}
#[derive(Clone, Debug)]
@ -452,10 +452,10 @@ pub struct MethodParam<'tcx> {
// instantiated with fresh variables at this point.
pub trait_ref: Rc<ty::TraitRef<'tcx>>,
// index of uint in the list of trait items. Note that this is NOT
// index of usize in the list of trait items. Note that this is NOT
// the index into the vtable, because the list of trait items
// includes associated types.
pub method_num: uint,
pub method_num: usize,
/// The impl for the trait from which the method comes. This
/// should only be used for certain linting/heuristic purposes
@ -474,13 +474,13 @@ pub struct MethodObject<'tcx> {
pub object_trait_id: ast::DefId,
// index of the method to be invoked amongst the trait's items
pub method_num: uint,
pub method_num: usize,
// index into the actual runtime vtable.
// the vtable is formed by concatenating together the method lists of
// the base object trait and all supertraits; this is the index into
// that vtable
pub vtable_index: uint,
pub vtable_index: usize,
}
#[derive(Clone)]
@ -511,7 +511,7 @@ pub struct MethodCall {
#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy)]
pub enum ExprAdjustment {
NoAdjustment,
AutoDeref(uint),
AutoDeref(usize),
AutoObject
}
@ -530,7 +530,7 @@ impl MethodCall {
}
}
pub fn autoderef(expr_id: ast::NodeId, autoderef: uint) -> MethodCall {
pub fn autoderef(expr_id: ast::NodeId, autoderef: usize) -> MethodCall {
MethodCall {
expr_id: expr_id,
adjustment: AutoDeref(1 + autoderef)
@ -564,7 +564,7 @@ pub enum vtable_origin<'tcx> {
The first argument is the param index (identifying T in the example),
and the second is the bound number (identifying baz)
*/
vtable_param(param_index, uint),
vtable_param(param_index, usize),
/*
Vtable automatically generated for a closure. The def ID is the
@ -639,12 +639,12 @@ impl<'tcx> CtxtArenas<'tcx> {
pub struct CommonTypes<'tcx> {
pub bool: Ty<'tcx>,
pub char: Ty<'tcx>,
pub int: Ty<'tcx>,
pub isize: Ty<'tcx>,
pub i8: Ty<'tcx>,
pub i16: Ty<'tcx>,
pub i32: Ty<'tcx>,
pub i64: Ty<'tcx>,
pub uint: Ty<'tcx>,
pub usize: Ty<'tcx>,
pub u8: Ty<'tcx>,
pub u16: Ty<'tcx>,
pub u32: Ty<'tcx>,
@ -877,10 +877,10 @@ macro_rules! sty_debug_print {
use middle::ty;
#[derive(Copy)]
struct DebugStat {
total: uint,
region_infer: uint,
ty_infer: uint,
both_infer: uint,
total: usize,
region_infer: usize,
ty_infer: usize,
both_infer: usize,
}
pub fn go(tcx: &ty::ctxt) {
@ -1024,7 +1024,7 @@ pub fn type_has_late_bound_regions(ty: Ty) -> bool {
///
/// So, for example, consider a type like the following, which has two binders:
///
/// for<'a> fn(x: for<'b> fn(&'a int, &'b int))
/// for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope
///
@ -1110,7 +1110,7 @@ impl<'tcx> PolyFnSig<'tcx> {
pub fn inputs(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
ty::Binder(self.0.inputs.clone())
}
pub fn input(&self, index: uint) -> ty::Binder<Ty<'tcx>> {
pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> {
ty::Binder(self.0.inputs[index])
}
pub fn output(&self) -> ty::Binder<FnOutput<'tcx>> {
@ -1132,7 +1132,7 @@ pub struct ParamTy {
/// regions (and perhaps later types) in a higher-ranked setting. In
/// particular, imagine a type like this:
///
/// for<'a> fn(for<'b> fn(&'b int, &'a int), &'a char)
/// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char)
/// ^ ^ | | |
/// | | | | |
/// | +------------+ 1 | |
@ -1149,11 +1149,11 @@ pub struct ParamTy {
/// count the number of binders, inside out. Some examples should help
/// clarify what I mean.
///
/// Let's start with the reference type `&'b int` that is the first
/// Let's start with the reference type `&'b isize` that is the first
/// argument to the inner function. This region `'b` is assigned a De
/// Bruijn index of 1, meaning "the innermost binder" (in this case, a
/// fn). The region `'a` that appears in the second argument type (`&'a
/// int`) would then be assigned a De Bruijn index of 2, meaning "the
/// isize`) would then be assigned a De Bruijn index of 2, meaning "the
/// second-innermost binder". (These indices are written on the arrays
/// in the diagram).
///
@ -1234,14 +1234,14 @@ pub enum BorrowKind {
/// implicit closure bindings. It is needed when you the closure
/// is borrowing or mutating a mutable referent, e.g.:
///
/// let x: &mut int = ...;
/// let x: &mut isize = ...;
/// let y = || *x += 5;
///
/// If we were to try to translate this closure into a more explicit
/// form, we'd encounter an error with the code as written:
///
/// struct Env { x: & &mut int }
/// let x: &mut int = ...;
/// struct Env { x: & &mut isize }
/// let x: &mut isize = ...;
/// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn
/// fn fn_ptr(env: &mut Env) { **env.x += 5; }
///
@ -1249,8 +1249,8 @@ pub enum BorrowKind {
/// in an aliasable location. To solve, you'd have to translate with
/// an `&mut` borrow:
///
/// struct Env { x: & &mut int }
/// let x: &mut int = ...;
/// struct Env { x: & &mut isize }
/// let x: &mut isize = ...;
/// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x
/// fn fn_ptr(env: &mut Env) { **env.x += 5; }
///
@ -1361,7 +1361,7 @@ pub enum sty<'tcx> {
ty_enum(DefId, &'tcx Substs<'tcx>),
ty_uniq(Ty<'tcx>),
ty_str,
ty_vec(Ty<'tcx>, Option<uint>), // Second field is length.
ty_vec(Ty<'tcx>, Option<usize>), // Second field is length.
ty_ptr(mt<'tcx>),
ty_rptr(&'tcx Region, mt<'tcx>),
@ -1491,7 +1491,7 @@ impl<'tcx> PolyTraitRef<'tcx> {
}
/// Binder is a binder for higher-ranked lifetimes. It is part of the
/// compiler's representation for things like `for<'a> Fn(&'a int)`
/// compiler's representation for things like `for<'a> Fn(&'a isize)`
/// (which would be represented by the type `PolyTraitRef ==
/// Binder<TraitRef>`). Note that when we skolemize, instantiate,
/// erase, or otherwise "discharge" these bound regions, we change the
@ -1552,9 +1552,9 @@ pub enum type_err<'tcx> {
terr_ptr_mutability,
terr_ref_mutability,
terr_vec_mutability,
terr_tuple_size(expected_found<uint>),
terr_fixed_array_size(expected_found<uint>),
terr_ty_param_size(expected_found<uint>),
terr_tuple_size(expected_found<usize>),
terr_fixed_array_size(expected_found<usize>),
terr_ty_param_size(expected_found<usize>),
terr_arg_count,
terr_regions_does_not_outlive(Region, Region),
terr_regions_not_same(Region, Region),
@ -1571,7 +1571,7 @@ pub enum type_err<'tcx> {
terr_cyclic_ty,
terr_convergence_mismatch(expected_found<bool>),
terr_projection_name_mismatched(expected_found<ast::Name>),
terr_projection_bounds_length(expected_found<uint>),
terr_projection_bounds_length(expected_found<usize>),
}
/// Bounds suitable for a named type parameter like `A` in `fn foo<A>`
@ -1600,7 +1600,7 @@ pub type BuiltinBounds = EnumSet<BuiltinBound>;
#[derive(Clone, RustcEncodable, PartialEq, Eq, RustcDecodable, Hash,
Debug, Copy)]
#[repr(uint)]
#[repr(usize)]
pub enum BuiltinBound {
BoundSend,
BoundSized,
@ -1628,10 +1628,10 @@ pub fn region_existential_bound<'tcx>(r: ty::Region) -> ExistentialBounds<'tcx>
}
impl CLike for BuiltinBound {
fn to_usize(&self) -> uint {
*self as uint
fn to_usize(&self) -> usize {
*self as usize
}
fn from_usize(v: uint) -> BuiltinBound {
fn from_usize(v: usize) -> BuiltinBound {
unsafe { mem::transmute(v) }
}
}
@ -2202,8 +2202,8 @@ impl<'tcx> Predicate<'tcx> {
///
/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like
/// `[[], [U:Bar<T>]]`. Now if there were some particular reference
/// like `Foo<int,uint>`, then the `InstantiatedPredicates` would be `[[],
/// [uint:Bar<int>]]`.
/// like `Foo<isize,usize>`, then the `InstantiatedPredicates` would be `[[],
/// [usize:Bar<isize>]]`.
#[derive(Clone, Debug)]
pub struct InstantiatedPredicates<'tcx> {
pub predicates: VecPerParamSpace<Predicate<'tcx>>,
@ -2545,12 +2545,12 @@ impl<'tcx> CommonTypes<'tcx> {
bool: intern_ty(arena, interner, ty_bool),
char: intern_ty(arena, interner, ty_char),
err: intern_ty(arena, interner, ty_err),
int: intern_ty(arena, interner, ty_int(ast::TyIs)),
isize: intern_ty(arena, interner, ty_int(ast::TyIs)),
i8: intern_ty(arena, interner, ty_int(ast::TyI8)),
i16: intern_ty(arena, interner, ty_int(ast::TyI16)),
i32: intern_ty(arena, interner, ty_int(ast::TyI32)),
i64: intern_ty(arena, interner, ty_int(ast::TyI64)),
uint: intern_ty(arena, interner, ty_uint(ast::TyUs)),
usize: intern_ty(arena, interner, ty_uint(ast::TyUs)),
u8: intern_ty(arena, interner, ty_uint(ast::TyU8)),
u16: intern_ty(arena, interner, ty_uint(ast::TyU16)),
u32: intern_ty(arena, interner, ty_uint(ast::TyU32)),
@ -2935,7 +2935,7 @@ impl FlagComputation {
pub fn mk_mach_int<'tcx>(tcx: &ctxt<'tcx>, tm: ast::IntTy) -> Ty<'tcx> {
match tm {
ast::TyIs => tcx.types.int,
ast::TyIs => tcx.types.isize,
ast::TyI8 => tcx.types.i8,
ast::TyI16 => tcx.types.i16,
ast::TyI32 => tcx.types.i32,
@ -2945,7 +2945,7 @@ pub fn mk_mach_int<'tcx>(tcx: &ctxt<'tcx>, tm: ast::IntTy) -> Ty<'tcx> {
pub fn mk_mach_uint<'tcx>(tcx: &ctxt<'tcx>, tm: ast::UintTy) -> Ty<'tcx> {
match tm {
ast::TyUs => tcx.types.uint,
ast::TyUs => tcx.types.usize,
ast::TyU8 => tcx.types.u8,
ast::TyU16 => tcx.types.u16,
ast::TyU32 => tcx.types.u32,
@ -3004,7 +3004,7 @@ pub fn mk_nil_ptr<'tcx>(cx: &ctxt<'tcx>) -> Ty<'tcx> {
mk_ptr(cx, mt {ty: mk_nil(cx), mutbl: ast::MutImmutable})
}
pub fn mk_vec<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>, sz: Option<uint>) -> Ty<'tcx> {
pub fn mk_vec<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>, sz: Option<usize>) -> Ty<'tcx> {
mk_t(cx, ty_vec(ty, sz))
}
@ -3130,9 +3130,9 @@ impl<'tcx> TyS<'tcx> {
/// structs or variants. For example:
///
/// ```notrust
/// int => { int }
/// Foo<Bar<int>> => { Foo<Bar<int>>, Bar<int>, int }
/// [int] => { [int], int }
/// isize => { isize }
/// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
/// [isize] => { [isize], isize }
/// ```
pub fn walk(&'tcx self) -> TypeWalker<'tcx> {
TypeWalker::new(self)
@ -3143,9 +3143,9 @@ impl<'tcx> TyS<'tcx> {
/// example:
///
/// ```notrust
/// int => { }
/// Foo<Bar<int>> => { Bar<int>, int }
/// [int] => { int }
/// isize => { }
/// Foo<Bar<isize>> => { Bar<isize>, isize }
/// [isize] => { isize }
/// ```
pub fn walk_children(&'tcx self) -> TypeWalker<'tcx> {
// Walks type reachable from `self` but not `self
@ -3343,7 +3343,7 @@ pub fn simd_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
}
}
pub fn simd_size(cx: &ctxt, ty: Ty) -> uint {
pub fn simd_size(cx: &ctxt, ty: Ty) -> usize {
match ty.sty {
ty_struct(did, _) => {
let fields = lookup_struct_fields(cx, did);
@ -3611,7 +3611,7 @@ pub fn type_contents<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> TypeContents {
cache.insert(ty, TC::None);
let result = match ty.sty {
// uint and int are ffi-unsafe
// usize and isize are ffi-unsafe
ty_uint(ast::TyUs) | ty_int(ast::TyIs) => {
TC::ReachesFfiUnsafe
}
@ -4292,7 +4292,7 @@ pub fn array_element_ty<'tcx>(tcx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>
/// For an enum `t`, `variant` is None only if `t` is a univariant enum.
pub fn positional_element_ty<'tcx>(cx: &ctxt<'tcx>,
ty: Ty<'tcx>,
i: uint,
i: usize,
variant: Option<ast::DefId>) -> Option<Ty<'tcx>> {
match (&ty.sty, variant) {
@ -4468,8 +4468,8 @@ pub fn pat_ty_opt<'tcx>(cx: &ctxt<'tcx>, pat: &ast::Pat) -> Option<Ty<'tcx>> {
// adjustments. See `expr_ty_adjusted()` instead.
//
// NB (2): This type doesn't provide type parameter substitutions; e.g. if you
// ask for the type of "id" in "id(3)", it will return "fn(&int) -> int"
// instead of "fn(ty) -> T with T = int".
// ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
// instead of "fn(ty) -> T with T = isize".
pub fn expr_ty<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Ty<'tcx> {
return node_id_to_type(cx, expr.id);
}
@ -4879,7 +4879,7 @@ pub fn stmt_node_id(s: &ast::Stmt) -> ast::NodeId {
}
pub fn field_idx_strict(tcx: &ctxt, name: ast::Name, fields: &[field])
-> uint {
-> usize {
let mut i = 0;
for f in fields { if f.name == name { return i; } i += 1; }
tcx.sess.bug(&format!(
@ -4891,7 +4891,7 @@ pub fn field_idx_strict(tcx: &ctxt, name: ast::Name, fields: &[field])
}
pub fn impl_or_trait_item_idx(id: ast::Name, trait_items: &[ImplOrTraitItem])
-> Option<uint> {
-> Option<usize> {
trait_items.iter().position(|m| m.name() == id)
}
@ -5163,7 +5163,7 @@ fn lookup_locally_or_in_crate_store<V, F>(descr: &str,
v
}
pub fn trait_item<'tcx>(cx: &ctxt<'tcx>, trait_did: ast::DefId, idx: uint)
pub fn trait_item<'tcx>(cx: &ctxt<'tcx>, trait_did: ast::DefId, idx: usize)
-> ImplOrTraitItem<'tcx> {
let method_def_id = (*ty::trait_item_def_ids(cx, trait_did))[idx].def_id();
impl_or_trait_item(cx, method_def_id)
@ -5238,10 +5238,10 @@ pub fn is_associated_type(cx: &ctxt, id: ast::DefId) -> bool {
pub fn associated_type_parameter_index(cx: &ctxt,
trait_def: &TraitDef,
associated_type_id: ast::DefId)
-> uint {
-> usize {
for type_parameter_def in trait_def.generics.types.iter() {
if type_parameter_def.def_id == associated_type_id {
return type_parameter_def.index as uint
return type_parameter_def.index as usize
}
}
cx.sess.bug("couldn't find associated type parameter index")
@ -5794,24 +5794,24 @@ pub fn closure_upvars<'tcx>(typer: &mc::Typer<'tcx>,
pub fn is_binopable<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>, op: ast::BinOp) -> bool {
#![allow(non_upper_case_globals)]
const tycat_other: int = 0;
const tycat_bool: int = 1;
const tycat_char: int = 2;
const tycat_int: int = 3;
const tycat_float: int = 4;
const tycat_raw_ptr: int = 6;
const tycat_other: isize = 0;
const tycat_bool: isize = 1;
const tycat_char: isize = 2;
const tycat_int: isize = 3;
const tycat_float: isize = 4;
const tycat_raw_ptr: isize = 6;
const opcat_add: int = 0;
const opcat_sub: int = 1;
const opcat_mult: int = 2;
const opcat_shift: int = 3;
const opcat_rel: int = 4;
const opcat_eq: int = 5;
const opcat_bit: int = 6;
const opcat_logic: int = 7;
const opcat_mod: int = 8;
const opcat_add: isize = 0;
const opcat_sub: isize = 1;
const opcat_mult: isize = 2;
const opcat_shift: isize = 3;
const opcat_rel: isize = 4;
const opcat_eq: isize = 5;
const opcat_bit: isize = 6;
const opcat_logic: isize = 7;
const opcat_mod: isize = 8;
fn opcat(op: ast::BinOp) -> int {
fn opcat(op: ast::BinOp) -> isize {
match op.node {
ast::BiAdd => opcat_add,
ast::BiSub => opcat_sub,
@ -5834,7 +5834,7 @@ pub fn is_binopable<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>, op: ast::BinOp) -> bool
}
}
fn tycat<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> int {
fn tycat<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> isize {
if type_is_simd(cx, ty) {
return tycat(cx, simd_type(cx, ty))
}
@ -5856,21 +5856,21 @@ pub fn is_binopable<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>, op: ast::BinOp) -> bool
/*other*/ [f, f, f, f, f, f, f, f, f],
/*bool*/ [f, f, f, f, t, t, t, t, f],
/*char*/ [f, f, f, f, t, t, f, f, f],
/*int*/ [t, t, t, t, t, t, t, f, t],
/*isize*/ [t, t, t, t, t, t, t, f, t],
/*float*/ [t, t, t, f, t, t, f, f, f],
/*bot*/ [t, t, t, t, t, t, t, t, t],
/*raw ptr*/ [f, f, f, f, t, t, f, f, f]];
return tbl[tycat(cx, ty) as uint ][opcat(op) as uint];
return tbl[tycat(cx, ty) as usize ][opcat(op) as usize];
}
// Returns the repeat count for a repeating vector expression.
pub fn eval_repeat_count(tcx: &ctxt, count_expr: &ast::Expr) -> uint {
match const_eval::eval_const_expr_partial(tcx, count_expr, Some(tcx.types.uint)) {
pub fn eval_repeat_count(tcx: &ctxt, count_expr: &ast::Expr) -> usize {
match const_eval::eval_const_expr_partial(tcx, count_expr, Some(tcx.types.usize)) {
Ok(val) => {
let found = match val {
const_eval::const_uint(count) => return count as uint,
const_eval::const_int(count) if count >= 0 => return count as uint,
const_eval::const_uint(count) => return count as usize,
const_eval::const_int(count) if count >= 0 => return count as usize,
const_eval::const_int(_) => "negative integer",
const_eval::const_float(_) => "float",
const_eval::const_str(_) => "string",
@ -6739,7 +6739,7 @@ pub fn liberate_late_bound_regions<'tcx, T>(
pub fn count_late_bound_regions<'tcx, T>(
tcx: &ty::ctxt<'tcx>,
value: &Binder<T>)
-> uint
-> usize
where T : TypeFoldable<'tcx> + Repr<'tcx>
{
let (_, skol_map) = replace_late_bound_regions(tcx, value, |_| ty::ReStatic);
@ -6785,8 +6785,8 @@ pub fn erase_late_bound_regions<'tcx, T>(
///
/// The chief purpose of this function is to canonicalize regions so that two
/// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
/// structurally identical. For example, `for<'a, 'b> fn(&'a int, &'b int)` and
/// `for<'a, 'b> fn(&'b int, &'a int)` will become identical after anonymization.
/// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
/// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
pub fn anonymize_late_bound_regions<'tcx, T>(
tcx: &ctxt<'tcx>,
sig: &Binder<T>)

View File

@ -15,7 +15,7 @@ use std::iter::Iterator;
pub struct TypeWalker<'tcx> {
stack: Vec<Ty<'tcx>>,
last_subtree: uint,
last_subtree: usize,
}
impl<'tcx> TypeWalker<'tcx> {
@ -80,14 +80,14 @@ impl<'tcx> TypeWalker<'tcx> {
/// Skips the subtree of types corresponding to the last type
/// returned by `next()`.
///
/// Example: Imagine you are walking `Foo<Bar<int>, uint>`.
/// Example: Imagine you are walking `Foo<Bar<int>, usize>`.
///
/// ```
/// let mut iter: TypeWalker = ...;
/// iter.next(); // yields Foo
/// iter.next(); // yields Bar<int>
/// iter.skip_current_subtree(); // skips int
/// iter.next(); // yields uint
/// iter.next(); // yields usize
/// ```
pub fn skip_current_subtree(&mut self) {
self.stack.truncate(self.last_subtree);

View File

@ -440,14 +440,14 @@ macro_rules! options {
}
}
fn parse_uint(slot: &mut uint, v: Option<&str>) -> bool {
fn parse_uint(slot: &mut usize, v: Option<&str>) -> bool {
match v.and_then(|s| s.parse().ok()) {
Some(i) => { *slot = i; true },
None => false
}
}
fn parse_opt_uint(slot: &mut Option<uint>, v: Option<&str>) -> bool {
fn parse_opt_uint(slot: &mut Option<usize>, v: Option<&str>) -> bool {
match v {
Some(s) => { *slot = s.parse().ok(); slot.is_some() }
None => { *slot = None; true }
@ -519,16 +519,16 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options,
"metadata to mangle symbol names with"),
extra_filename: String = ("".to_string(), parse_string,
"extra data to put in each output filename"),
codegen_units: uint = (1, parse_uint,
codegen_units: usize = (1, parse_uint,
"divide crate into N units to optimize in parallel"),
remark: Passes = (SomePasses(Vec::new()), parse_passes,
"print remarks for these optimization passes (space separated, or \"all\")"),
no_stack_check: bool = (false, parse_bool,
"disable checks for stack exhaustion (a memory-safety hazard!)"),
debuginfo: Option<uint> = (None, parse_opt_uint,
debuginfo: Option<usize> = (None, parse_opt_uint,
"debug info emission level, 0 = no debug info, 1 = line tables only, \
2 = full debug info with variable and type information"),
opt_level: Option<uint> = (None, parse_opt_uint,
opt_level: Option<usize> = (None, parse_opt_uint,
"Optimize with possible levels 0-3"),
debug_assertions: Option<bool> = (None, parse_opt_bool,
"explicitly enable the cfg(debug_assertions) directive"),

View File

@ -58,7 +58,7 @@ pub struct Session {
/// The maximum recursion limit for potentially infinitely recursive
/// operations such as auto-dereference and monomorphization.
pub recursion_limit: Cell<uint>,
pub recursion_limit: Cell<usize>,
pub can_print_warnings: bool
}
@ -106,7 +106,7 @@ impl Session {
}
self.diagnostic().handler().err(msg)
}
pub fn err_count(&self) -> uint {
pub fn err_count(&self) -> usize {
self.diagnostic().handler().err_count()
}
pub fn has_errors(&self) -> bool {

View File

@ -35,7 +35,7 @@ pub struct ErrorReported;
pub fn time<T, U, F>(do_it: bool, what: &str, u: U, f: F) -> T where
F: FnOnce(U) -> T,
{
thread_local!(static DEPTH: Cell<uint> = Cell::new(0));
thread_local!(static DEPTH: Cell<usize> = Cell::new(0));
if !do_it { return f(u); }
let old = DEPTH.with(|slot| {
@ -196,10 +196,10 @@ pub fn can_reach<T, S>(edges_map: &HashMap<T, Vec<T>, S>, source: T,
/// # Examples
/// ```
/// struct Context {
/// cache: RefCell<HashMap<uint, uint>>
/// cache: RefCell<HashMap<usize, usize>>
/// }
///
/// fn factorial(ctxt: &Context, n: uint) -> uint {
/// fn factorial(ctxt: &Context, n: usize) -> usize {
/// memoized(&ctxt.cache, n, |n| match n {
/// 0 | 1 => n,
/// _ => factorial(ctxt, n - 2) + factorial(ctxt, n - 1)

View File

@ -10,7 +10,7 @@
use std::cmp;
pub fn lev_distance(me: &str, t: &str) -> uint {
pub fn lev_distance(me: &str, t: &str) -> usize {
if me.is_empty() { return t.chars().count(); }
if t.is_empty() { return me.chars().count(); }

View File

@ -30,10 +30,10 @@ pub enum UndoLog<D:SnapshotVecDelegate> {
CommittedSnapshot,
/// New variable with given index was created.
NewElem(uint),
NewElem(usize),
/// Variable with given index was changed *from* the given value.
SetElem(uint, D::Value),
SetElem(usize, D::Value),
/// Extensible set of actions
Other(D::Undo)
@ -48,7 +48,7 @@ pub struct SnapshotVec<D:SnapshotVecDelegate> {
// Snapshots are tokens that should be created/consumed linearly.
pub struct Snapshot {
// Length of the undo log at the time the snapshot was taken.
length: uint,
length: usize,
}
pub trait SnapshotVecDelegate {
@ -77,7 +77,7 @@ impl<D:SnapshotVecDelegate> SnapshotVec<D> {
}
}
pub fn push(&mut self, elem: D::Value) -> uint {
pub fn push(&mut self, elem: D::Value) -> usize {
let len = self.values.len();
self.values.push(elem);
@ -88,20 +88,20 @@ impl<D:SnapshotVecDelegate> SnapshotVec<D> {
len
}
pub fn get<'a>(&'a self, index: uint) -> &'a D::Value {
pub fn get<'a>(&'a self, index: usize) -> &'a D::Value {
&self.values[index]
}
/// Returns a mutable pointer into the vec; whatever changes you make here cannot be undone
/// automatically, so you should be sure call `record()` with some sort of suitable undo
/// action.
pub fn get_mut<'a>(&'a mut self, index: uint) -> &'a mut D::Value {
pub fn get_mut<'a>(&'a mut self, index: usize) -> &'a mut D::Value {
&mut self.values[index]
}
/// Updates the element at the given index. The old value will saved (and perhaps restored) if
/// a snapshot is active.
pub fn set(&mut self, index: uint, new_elem: D::Value) {
pub fn set(&mut self, index: usize, new_elem: D::Value) {
let old_elem = mem::replace(&mut self.values[index], new_elem);
if self.in_snapshot() {
self.undo_log.push(SetElem(index, old_elem));

View File

@ -8,17 +8,17 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub const BOX_FIELD_DROP_GLUE: uint = 1;
pub const BOX_FIELD_BODY: uint = 4;
pub const BOX_FIELD_DROP_GLUE: usize = 1;
pub const BOX_FIELD_BODY: usize = 4;
/// The first half of a fat pointer.
/// - For a closure, this is the code address.
/// - For an object or trait instance, this is the address of the box.
/// - For a slice, this is the base address.
pub const FAT_PTR_ADDR: uint = 0;
pub const FAT_PTR_ADDR: usize = 0;
/// The second half of a fat pointer.
/// - For a closure, this is the address of the environment.
/// - For an object or trait instance, this is the address of the vtable.
/// - For a slice, this is the length.
pub const FAT_PTR_EXTRA: uint = 1;
pub const FAT_PTR_EXTRA: usize = 1;

View File

@ -246,7 +246,7 @@ impl<'a> ArchiveBuilder<'a> {
// Don't allow the total size of `args` to grow beyond 32,000 bytes.
// Windows will raise an error if the argument string is longer than
// 32,768, and we leave a bit of extra space for the program name.
const ARG_LENGTH_LIMIT: uint = 32_000;
const ARG_LENGTH_LIMIT: usize = 32_000;
for member_name in &self.members {
let len = member_name.to_string_lossy().len();

View File

@ -36,7 +36,6 @@
#![feature(collections)]
#![feature(core)]
#![feature(old_fs)]
#![feature(int_uint)]
#![feature(io)]
#![feature(old_io)]
#![feature(old_path)]

View File

@ -90,29 +90,29 @@ trait FixedBuffer {
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
fn zero_until(&mut self, idx: usize);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
fn position(&self) -> usize;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
fn remaining(&self) -> usize;
/// Get the size of the buffer
fn size(&self) -> uint;
fn size(&self) -> usize;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: uint,
buffer_idx: usize,
}
impl FixedBuffer64 {
@ -174,13 +174,13 @@ impl FixedBuffer for FixedBuffer64 {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer[self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return &mut self.buffer[self.buffer_idx - len..self.buffer_idx];
}
@ -191,11 +191,11 @@ impl FixedBuffer for FixedBuffer64 {
return &self.buffer[..64];
}
fn position(&self) -> uint { self.buffer_idx }
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
@ -204,11 +204,11 @@ trait StandardPadding {
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: uint, func: F) where F: FnMut(&[u8]);
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: uint, mut func: F) where F: FnMut(&[u8]) {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
@ -244,7 +244,7 @@ pub trait Digest {
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
@ -514,7 +514,7 @@ impl Digest for Sha256 {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
@ -613,7 +613,7 @@ mod tests {
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: uint, expected: &str) {
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str) {
let total_size = 1000000;
let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect();
let mut rng = IsaacRng::new_unseeded();
@ -622,7 +622,7 @@ mod tests {
digest.reset();
while count < total_size {
let next: uint = rng.gen_range(0, 2 * blocksize + 1);
let next: usize = rng.gen_range(0, 2 * blocksize + 1);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(&buffer[..size]);

View File

@ -221,7 +221,7 @@ mod svh_visitor {
SawExprLoop(Option<token::InternedString>),
SawExprField(token::InternedString),
SawExprTupField(uint),
SawExprTupField(usize),
SawExprBreak(Option<token::InternedString>),
SawExprAgain(Option<token::InternedString>),

View File

@ -27,7 +27,7 @@ const NUM_RETRIES: u32 = 1 << 31;
// be enough to dissuade an attacker from trying to preemptively create names
// of that length, but not so huge that we unnecessarily drain the random number
// generator of entropy.
const NUM_RAND_CHARS: uint = 12;
const NUM_RAND_CHARS: usize = 12;
impl TempDir {
/// Attempts to make a temporary directory inside of `tmpdir` whose name

View File

@ -335,7 +335,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
return true;
}
pub fn loans_generated_by(&self, scope: region::CodeExtent) -> Vec<uint> {
pub fn loans_generated_by(&self, scope: region::CodeExtent) -> Vec<usize> {
//! Returns a vector of the loans that are generated as
//! we enter `scope`.
@ -727,7 +727,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
/// let a: int;
/// a = 10; // ok, even though a is uninitialized
///
/// struct Point { x: uint, y: uint }
/// struct Point { x: usize, y: usize }
/// let p: Point;
/// p.x = 22; // ok, even though `p` is uninitialized
///

View File

@ -88,7 +88,7 @@ pub fn check_crate(tcx: &ty::ctxt) {
make_stat(&bccx, bccx.stats.stable_paths));
}
fn make_stat(bccx: &BorrowckCtxt, stat: uint) -> String {
fn make_stat(bccx: &BorrowckCtxt, stat: usize) -> String {
let total = bccx.stats.guaranteed_paths as f64;
let perc = if total == 0.0 { 0.0 } else { stat as f64 * 100.0 / total };
format!("{} ({:.0}%)", stat, perc)
@ -238,10 +238,10 @@ pub struct BorrowckCtxt<'a, 'tcx: 'a> {
}
struct BorrowStats {
loaned_paths_same: uint,
loaned_paths_imm: uint,
stable_paths: uint,
guaranteed_paths: uint
loaned_paths_same: usize,
loaned_paths_imm: usize,
stable_paths: usize,
guaranteed_paths: usize
}
pub type BckResult<'tcx, T> = Result<T, BckError<'tcx>>;
@ -251,7 +251,7 @@ pub type BckResult<'tcx, T> = Result<T, BckError<'tcx>>;
/// Record of a loan that was issued.
pub struct Loan<'tcx> {
index: uint,
index: usize,
loan_path: Rc<LoanPath<'tcx>>,
kind: ty::BorrowKind,
restricted_paths: Vec<Rc<LoanPath<'tcx>>>,
@ -382,7 +382,7 @@ impl<'tcx> LoanPath<'tcx> {
}
}
fn depth(&self) -> uint {
fn depth(&self) -> usize {
match self.kind {
LpExtend(ref base, _, LpDeref(_)) => base.depth(),
LpExtend(ref base, _, LpInterior(_)) => base.depth() + 1,
@ -1043,7 +1043,7 @@ fn is_statement_scope(tcx: &ty::ctxt, region: ty::Region) -> bool {
impl BitwiseOperator for LoanDataFlowOperator {
#[inline]
fn join(&self, succ: uint, pred: uint) -> uint {
fn join(&self, succ: usize, pred: usize) -> usize {
succ | pred // loans from both preds are in scope
}
}

View File

@ -76,10 +76,10 @@ pub struct FlowedMoveData<'a, 'tcx: 'a> {
/// Index into `MoveData.paths`, used like a pointer
#[derive(Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct MovePathIndex(uint);
pub struct MovePathIndex(usize);
impl MovePathIndex {
fn get(&self) -> uint {
fn get(&self) -> usize {
let MovePathIndex(v) = *self; v
}
}
@ -95,10 +95,10 @@ const InvalidMovePathIndex: MovePathIndex = MovePathIndex(usize::MAX);
/// Index into `MoveData.moves`, used like a pointer
#[derive(Copy, PartialEq)]
pub struct MoveIndex(uint);
pub struct MoveIndex(usize);
impl MoveIndex {
fn get(&self) -> uint {
fn get(&self) -> usize {
let MoveIndex(v) = *self; v
}
}
@ -740,7 +740,7 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
impl BitwiseOperator for MoveDataFlowOperator {
#[inline]
fn join(&self, succ: uint, pred: uint) -> uint {
fn join(&self, succ: usize, pred: usize) -> usize {
succ | pred // moves from both preds are in scope
}
}
@ -754,7 +754,7 @@ impl DataFlowOperator for MoveDataFlowOperator {
impl BitwiseOperator for AssignDataFlowOperator {
#[inline]
fn join(&self, succ: uint, pred: uint) -> uint {
fn join(&self, succ: usize, pred: usize) -> usize {
succ | pred // moves from both preds are in scope
}
}

View File

@ -79,7 +79,7 @@ impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> {
cfgidx: CFGIndex,
dfcx: &DataFlowContext<'a, 'tcx, O>,
mut to_lp: F) -> String where
F: FnMut(uint) -> Rc<LoanPath<'tcx>>,
F: FnMut(usize) -> Rc<LoanPath<'tcx>>,
{
let mut saw_some = false;
let mut set = "{".to_string();

View File

@ -22,7 +22,6 @@
#![allow(non_camel_case_types)]
#![feature(core)]
#![feature(int_uint)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]

View File

@ -28,7 +28,6 @@
#![feature(box_syntax)]
#![feature(collections)]
#![feature(core)]
#![feature(int_uint)]
#![feature(libc)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
@ -101,7 +100,7 @@ const BUG_REPORT_URL: &'static str =
"https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md#bug-reports";
pub fn run(args: Vec<String>) -> int {
pub fn run(args: Vec<String>) -> isize {
monitor(move || run_compiler(&args, &mut RustcDefaultCalls));
0
}
@ -795,7 +794,7 @@ fn parse_crate_attrs(sess: &Session, input: &Input) ->
/// errors of the compiler.
#[allow(deprecated)]
pub fn monitor<F:FnOnce()+Send+'static>(f: F) {
const STACK_SIZE: uint = 8 * 1024 * 1024; // 8MB
const STACK_SIZE: usize = 8 * 1024 * 1024; // 8MB
struct Sink(Arc<Mutex<Vec<u8>>>);
impl Write for Sink {

View File

@ -88,13 +88,13 @@ impl Emitter for ExpectErrorEmitter {
}
}
fn errors(msgs: &[&str]) -> (Box<Emitter+Send>, uint) {
fn errors(msgs: &[&str]) -> (Box<Emitter+Send>, usize) {
let v = msgs.iter().map(|m| m.to_string()).collect();
(box ExpectErrorEmitter { messages: v } as Box<Emitter+Send>, msgs.len())
}
fn test_env<F>(source_string: &str,
(emitter, expected_err_count): (Box<Emitter+Send>, uint),
(emitter, expected_err_count): (Box<Emitter+Send>, usize),
body: F) where
F: FnOnce(Env),
{
@ -178,7 +178,7 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
fn search_mod(this: &Env,
m: &ast::Mod,
idx: uint,
idx: usize,
names: &[String])
-> Option<ast::NodeId> {
assert!(idx < names.len());
@ -192,7 +192,7 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
fn search(this: &Env,
it: &ast::Item,
idx: uint,
idx: usize,
names: &[String])
-> Option<ast::NodeId> {
if idx == names.len() {
@ -300,14 +300,14 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
pub fn t_rptr(&self, r: ty::Region) -> Ty<'tcx> {
ty::mk_imm_rptr(self.infcx.tcx,
self.infcx.tcx.mk_region(r),
self.tcx().types.int)
self.tcx().types.isize)
}
pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> {
let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1));
ty::mk_imm_rptr(self.infcx.tcx,
self.infcx.tcx.mk_region(r),
self.tcx().types.int)
self.tcx().types.isize)
}
pub fn t_rptr_late_bound_with_debruijn(&self,
@ -317,13 +317,13 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
let r = self.re_late_bound_with_debruijn(id, debruijn);
ty::mk_imm_rptr(self.infcx.tcx,
self.infcx.tcx.mk_region(r),
self.tcx().types.int)
self.tcx().types.isize)
}
pub fn t_rptr_scope(&self, id: ast::NodeId) -> Ty<'tcx> {
let r = ty::ReScope(CodeExtent::from_node_id(id));
ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r),
self.tcx().types.int)
self.tcx().types.isize)
}
pub fn re_free(&self, nid: ast::NodeId, id: u32) -> ty::Region {
@ -335,13 +335,13 @@ impl<'a, 'tcx> Env<'a, 'tcx> {
let r = self.re_free(nid, id);
ty::mk_imm_rptr(self.infcx.tcx,
self.infcx.tcx.mk_region(r),
self.tcx().types.int)
self.tcx().types.isize)
}
pub fn t_rptr_static(&self) -> Ty<'tcx> {
ty::mk_imm_rptr(self.infcx.tcx,
self.infcx.tcx.mk_region(ty::ReStatic),
self.tcx().types.int)
self.tcx().types.isize)
}
pub fn dummy_type_trace(&self) -> infer::TypeTrace<'tcx> {
@ -464,15 +464,15 @@ fn contravariant_region_ptr_err() {
fn sub_free_bound_false() {
//! Test that:
//!
//! fn(&'a int) <: for<'b> fn(&'b int)
//! fn(&'a isize) <: for<'b> fn(&'b isize)
//!
//! does NOT hold.
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_free1 = env.t_rptr_free(0, 1);
let t_rptr_bound1 = env.t_rptr_late_bound(1);
env.check_not_sub(env.t_fn(&[t_rptr_free1], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int));
env.check_not_sub(env.t_fn(&[t_rptr_free1], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize));
})
}
@ -480,15 +480,15 @@ fn sub_free_bound_false() {
fn sub_bound_free_true() {
//! Test that:
//!
//! for<'a> fn(&'a int) <: fn(&'b int)
//! for<'a> fn(&'a isize) <: fn(&'b isize)
//!
//! DOES hold.
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_free1 = env.t_rptr_free(0, 1);
env.check_sub(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_free1], env.tcx().types.int));
env.check_sub(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_free1], env.tcx().types.isize));
})
}
@ -496,15 +496,15 @@ fn sub_bound_free_true() {
fn sub_free_bound_false_infer() {
//! Test that:
//!
//! fn(_#1) <: for<'b> fn(&'b int)
//! fn(_#1) <: for<'b> fn(&'b isize)
//!
//! does NOT hold for any instantiation of `_#1`.
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_infer1 = env.infcx.next_ty_var();
let t_rptr_bound1 = env.t_rptr_late_bound(1);
env.check_not_sub(env.t_fn(&[t_infer1], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int));
env.check_not_sub(env.t_fn(&[t_infer1], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize));
})
}
@ -512,19 +512,19 @@ fn sub_free_bound_false_infer() {
fn lub_free_bound_infer() {
//! Test result of:
//!
//! LUB(fn(_#1), for<'b> fn(&'b int))
//! LUB(fn(_#1), for<'b> fn(&'b isize))
//!
//! This should yield `fn(&'_ int)`. We check
//! that it yields `fn(&'x int)` for some free `'x`,
//! This should yield `fn(&'_ isize)`. We check
//! that it yields `fn(&'x isize)` for some free `'x`,
//! anyhow.
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_infer1 = env.infcx.next_ty_var();
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_free1 = env.t_rptr_free(0, 1);
env.check_lub(env.t_fn(&[t_infer1], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_free1], env.tcx().types.int));
env.check_lub(env.t_fn(&[t_infer1], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_free1], env.tcx().types.isize));
});
}
@ -533,9 +533,9 @@ fn lub_bound_bound() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_bound2 = env.t_rptr_late_bound(2);
env.check_lub(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_bound2], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int));
env.check_lub(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound2], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize));
})
}
@ -544,9 +544,9 @@ fn lub_bound_free() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_free1 = env.t_rptr_free(0, 1);
env.check_lub(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_free1], env.tcx().types.int),
env.t_fn(&[t_rptr_free1], env.tcx().types.int));
env.check_lub(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_free1], env.tcx().types.isize),
env.t_fn(&[t_rptr_free1], env.tcx().types.isize));
})
}
@ -555,9 +555,9 @@ fn lub_bound_static() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_static = env.t_rptr_static();
env.check_lub(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_static], env.tcx().types.int),
env.t_fn(&[t_rptr_static], env.tcx().types.int));
env.check_lub(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_static], env.tcx().types.isize),
env.t_fn(&[t_rptr_static], env.tcx().types.isize));
})
}
@ -578,9 +578,9 @@ fn lub_free_free() {
let t_rptr_free1 = env.t_rptr_free(0, 1);
let t_rptr_free2 = env.t_rptr_free(0, 2);
let t_rptr_static = env.t_rptr_static();
env.check_lub(env.t_fn(&[t_rptr_free1], env.tcx().types.int),
env.t_fn(&[t_rptr_free2], env.tcx().types.int),
env.t_fn(&[t_rptr_static], env.tcx().types.int));
env.check_lub(env.t_fn(&[t_rptr_free1], env.tcx().types.isize),
env.t_fn(&[t_rptr_free2], env.tcx().types.isize),
env.t_fn(&[t_rptr_static], env.tcx().types.isize));
})
}
@ -603,9 +603,9 @@ fn glb_free_free_with_common_scope() {
let t_rptr_free1 = env.t_rptr_free(0, 1);
let t_rptr_free2 = env.t_rptr_free(0, 2);
let t_rptr_scope = env.t_rptr_scope(0);
env.check_glb(env.t_fn(&[t_rptr_free1], env.tcx().types.int),
env.t_fn(&[t_rptr_free2], env.tcx().types.int),
env.t_fn(&[t_rptr_scope], env.tcx().types.int));
env.check_glb(env.t_fn(&[t_rptr_free1], env.tcx().types.isize),
env.t_fn(&[t_rptr_free2], env.tcx().types.isize),
env.t_fn(&[t_rptr_scope], env.tcx().types.isize));
})
}
@ -614,9 +614,9 @@ fn glb_bound_bound() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_bound2 = env.t_rptr_late_bound(2);
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_bound2], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int));
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound2], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize));
})
}
@ -625,9 +625,9 @@ fn glb_bound_free() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_free1 = env.t_rptr_free(0, 1);
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_free1], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int));
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_free1], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize));
})
}
@ -637,14 +637,14 @@ fn glb_bound_free_infer() {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_infer1 = env.infcx.next_ty_var();
// compute GLB(fn(_) -> int, for<'b> fn(&'b int) -> int),
// which should yield for<'b> fn(&'b int) -> int
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_infer1], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int));
// compute GLB(fn(_) -> isize, for<'b> fn(&'b isize) -> isize),
// which should yield for<'b> fn(&'b isize) -> isize
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_infer1], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize));
// as a side-effect, computing GLB should unify `_` with
// `&'_ int`
// `&'_ isize`
let t_resolve1 = env.infcx.shallow_resolve(t_infer1);
match t_resolve1.sty {
ty::ty_rptr(..) => { }
@ -658,9 +658,9 @@ fn glb_bound_static() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let t_rptr_bound1 = env.t_rptr_late_bound(1);
let t_rptr_static = env.t_rptr_static();
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.int),
env.t_fn(&[t_rptr_static], env.tcx().types.int),
env.t_fn(&[t_rptr_bound1], env.tcx().types.int));
env.check_glb(env.t_fn(&[t_rptr_bound1], env.tcx().types.isize),
env.t_fn(&[t_rptr_static], env.tcx().types.isize),
env.t_fn(&[t_rptr_bound1], env.tcx().types.isize));
})
}
@ -684,7 +684,7 @@ fn subst_ty_renumber_bound() {
let substs = subst::Substs::new_type(vec![t_rptr_bound1], vec![]);
let t_substituted = t_source.subst(env.infcx.tcx, &substs);
// t_expected = fn(&'a int)
// t_expected = fn(&'a isize)
let t_expected = {
let t_ptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(2));
env.t_fn(&[t_ptr_bound2], env.t_nil())
@ -719,7 +719,7 @@ fn subst_ty_renumber_some_bounds() {
let substs = subst::Substs::new_type(vec![t_rptr_bound1], vec![]);
let t_substituted = t_source.subst(env.infcx.tcx, &substs);
// t_expected = (&'a int, fn(&'a int))
// t_expected = (&'a isize, fn(&'a isize))
//
// but not that the Debruijn index is different in the different cases.
let t_expected = {
@ -771,7 +771,7 @@ fn subst_region_renumber_region() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let re_bound1 = env.re_late_bound_with_debruijn(1, ty::DebruijnIndex::new(1));
// type t_source<'a> = fn(&'a int)
// type t_source<'a> = fn(&'a isize)
let t_source = {
let re_early = env.re_early_bound(subst::TypeSpace, 0, "'a");
env.t_fn(&[env.t_rptr(re_early)], env.t_nil())
@ -780,7 +780,7 @@ fn subst_region_renumber_region() {
let substs = subst::Substs::new_type(vec![], vec![re_bound1]);
let t_substituted = t_source.subst(env.infcx.tcx, &substs);
// t_expected = fn(&'a int)
// t_expected = fn(&'a isize)
//
// but not that the Debruijn index is different in the different cases.
let t_expected = {
@ -802,8 +802,8 @@ fn subst_region_renumber_region() {
fn walk_ty() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let tcx = env.infcx.tcx;
let int_ty = tcx.types.int;
let uint_ty = tcx.types.uint;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
let tup1_ty = ty::mk_tup(tcx, vec!(int_ty, uint_ty, int_ty, uint_ty));
let tup2_ty = ty::mk_tup(tcx, vec!(tup1_ty, tup1_ty, uint_ty));
let uniq_ty = ty::mk_uniq(tcx, tup2_ty);
@ -821,8 +821,8 @@ fn walk_ty() {
fn walk_ty_skip_subtree() {
test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
let tcx = env.infcx.tcx;
let int_ty = tcx.types.int;
let uint_ty = tcx.types.uint;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
let tup1_ty = ty::mk_tup(tcx, vec!(int_ty, uint_ty, int_ty, uint_ty));
let tup2_ty = ty::mk_tup(tcx, vec!(tup1_ty, tup1_ty, uint_ty));
let uniq_ty = ty::mk_uniq(tcx, tup2_ty);
@ -836,7 +836,7 @@ fn walk_ty_skip_subtree() {
(uint_ty, false),
(int_ty, false),
(uint_ty, false),
(tup1_ty, true), // skip the int/uint/int/uint
(tup1_ty, true), // skip the isize/usize/isize/usize
(uint_ty, false));
expected.reverse();

View File

@ -180,7 +180,7 @@ impl LintPass for TypeLimits {
if let ast::LitInt(shift, _) = lit.node { shift >= bits }
else { false }
} else {
match eval_const_expr_partial(cx.tcx, &**r, Some(cx.tcx.types.uint)) {
match eval_const_expr_partial(cx.tcx, &**r, Some(cx.tcx.types.usize)) {
Ok(const_int(shift)) => { shift as u64 >= bits },
Ok(const_uint(shift)) => { shift >= bits },
_ => { false }

View File

@ -34,7 +34,6 @@
#![feature(box_syntax)]
#![feature(collections)]
#![feature(core)]
#![feature(int_uint)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]

View File

@ -61,7 +61,7 @@ impl ArchiveRO {
if ptr.is_null() {
None
} else {
Some(slice::from_raw_parts(ptr as *const u8, size as uint))
Some(slice::from_raw_parts(ptr as *const u8, size as usize))
}
}
}

View File

@ -28,7 +28,6 @@
#![feature(box_syntax)]
#![feature(collections)]
#![feature(int_uint)]
#![feature(libc)]
#![feature(link_args)]
#![feature(staged_api)]
@ -77,7 +76,7 @@ pub type Bool = c_uint;
pub const True: Bool = 1 as Bool;
pub const False: Bool = 0 as Bool;
// Consts for the LLVM CallConv type, pre-cast to uint.
// Consts for the LLVM CallConv type, pre-cast to usize.
#[derive(Copy, PartialEq)]
pub enum CallConv {
@ -242,7 +241,7 @@ impl AttrHelper for SpecialAttribute {
}
pub struct AttrBuilder {
attrs: Vec<(uint, Box<AttrHelper+'static>)>
attrs: Vec<(usize, Box<AttrHelper+'static>)>
}
impl AttrBuilder {
@ -252,13 +251,13 @@ impl AttrBuilder {
}
}
pub fn arg<'a, T: AttrHelper + 'static>(&'a mut self, idx: uint, a: T) -> &'a mut AttrBuilder {
pub fn arg<'a, T: AttrHelper + 'static>(&'a mut self, idx: usize, a: T) -> &'a mut AttrBuilder {
self.attrs.push((idx, box a as Box<AttrHelper+'static>));
self
}
pub fn ret<'a, T: AttrHelper + 'static>(&'a mut self, a: T) -> &'a mut AttrBuilder {
self.attrs.push((ReturnIndex as uint, box a as Box<AttrHelper+'static>));
self.attrs.push((ReturnIndex as usize, box a as Box<AttrHelper+'static>));
self
}
@ -693,7 +692,7 @@ extern {
-> ValueRef;
pub fn LLVMConstFCmp(Pred: c_ushort, V1: ValueRef, V2: ValueRef)
-> ValueRef;
/* only for int/vector */
/* only for isize/vector */
pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef;
pub fn LLVMIsConstant(Val: ValueRef) -> Bool;
pub fn LLVMIsNull(Val: ValueRef) -> Bool;
@ -2167,7 +2166,7 @@ impl ObjectFile {
pub fn new(llmb: MemoryBufferRef) -> Option<ObjectFile> {
unsafe {
let llof = LLVMCreateObjectFile(llmb);
if llof as int == 0 {
if llof as isize == 0 {
// LLVMCreateObjectFile took ownership of llmb
return None
}
@ -2227,7 +2226,7 @@ type RustStringRepr = *mut RefCell<Vec<u8>>;
pub unsafe extern "C" fn rust_llvm_string_write_impl(sr: RustStringRef,
ptr: *const c_char,
size: size_t) {
let slice = slice::from_raw_parts(ptr as *const u8, size as uint);
let slice = slice::from_raw_parts(ptr as *const u8, size as usize);
let sr: RustStringRepr = mem::transmute(sr);
(*sr).borrow_mut().push_all(slice);

View File

@ -19,7 +19,6 @@
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(int_uint)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
#![feature(staged_api)]
@ -378,7 +377,7 @@ enum PrivacyResult {
}
enum FieldName {
UnnamedField(uint), // index
UnnamedField(usize), // index
// (Name, not Ident, because struct fields are not macro-hygienic)
NamedField(ast::Name),
}

View File

@ -22,7 +22,6 @@
#![feature(alloc)]
#![feature(collections)]
#![feature(core)]
#![feature(int_uint)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
#![feature(staged_api)]
@ -327,7 +326,7 @@ enum UseLexicalScopeFlag {
enum ModulePrefixResult {
NoPrefixFound,
PrefixFound(Rc<Module>, uint)
PrefixFound(Rc<Module>, usize)
}
#[derive(Copy, PartialEq)]
@ -415,10 +414,10 @@ pub struct Module {
import_resolutions: RefCell<HashMap<Name, ImportResolution>>,
// The number of unresolved globs that this module exports.
glob_count: Cell<uint>,
glob_count: Cell<usize>,
// The index of the import we're resolving.
resolved_import_count: Cell<uint>,
resolved_import_count: Cell<usize>,
// Whether this module is populated. If not populated, any attempt to
// access the children must be preceded with a
@ -777,7 +776,7 @@ pub struct Resolver<'a, 'tcx:'a> {
structs: FnvHashMap<DefId, Vec<Name>>,
// The number of imports that are currently unresolved.
unresolved_imports: uint,
unresolved_imports: usize,
// The module that represents the current item scope.
current_module: Rc<Module>,
@ -959,7 +958,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
fn resolve_module_path_from_root(&mut self,
module_: Rc<Module>,
module_path: &[Name],
index: uint,
index: usize,
span: Span,
name_search_type: NameSearchType,
lp: LastPrivate)
@ -3053,12 +3052,12 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
NoSuggestion
}
fn find_best_match_for_name(&mut self, name: &str, max_distance: uint)
fn find_best_match_for_name(&mut self, name: &str, max_distance: usize)
-> Option<String> {
let this = &mut *self;
let mut maybes: Vec<token::InternedString> = Vec::new();
let mut values: Vec<uint> = Vec::new();
let mut values: Vec<usize> = Vec::new();
for rib in this.value_ribs.iter().rev() {
for (&k, _) in &rib.bindings {

View File

@ -115,7 +115,7 @@ pub struct ImportResolution {
// Note that this is usually either 0 or 1 - shadowing is forbidden the only
// way outstanding_references is > 1 in a legal program is if the name is
// used in both namespaces.
pub outstanding_references: uint,
pub outstanding_references: usize,
/// The value that this `use` directive names, if there is one.
pub value_target: Option<Target>,

View File

@ -60,16 +60,16 @@ pub const RLIB_BYTECODE_OBJECT_MAGIC: &'static [u8] = b"RUST_OBJECT";
pub const RLIB_BYTECODE_OBJECT_VERSION: u32 = 1;
// The offset in bytes the bytecode object format version number can be found at
pub const RLIB_BYTECODE_OBJECT_VERSION_OFFSET: uint = 11;
pub const RLIB_BYTECODE_OBJECT_VERSION_OFFSET: usize = 11;
// The offset in bytes the size of the compressed bytecode can be found at in
// format version 1
pub const RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET: uint =
pub const RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET: usize =
RLIB_BYTECODE_OBJECT_VERSION_OFFSET + 4;
// The offset in bytes the compressed LLVM bytecode can be found at in format
// version 1
pub const RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET: uint =
pub const RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET: usize =
RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET + 8;
@ -323,7 +323,7 @@ pub fn mangle_exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, path: PathEl
"abcdefghijklmnopqrstuvwxyz\
ABCDEFGHIJKLMNOPQRSTUVWXYZ\
0123456789";
let id = id as uint;
let id = id as usize;
let extra1 = id % EXTRA_CHARS.len();
let id = id / EXTRA_CHARS.len();
let extra2 = id % EXTRA_CHARS.len();
@ -695,7 +695,7 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write,
RLIB_BYTECODE_OBJECT_MAGIC.len() + // magic id
mem::size_of_val(&RLIB_BYTECODE_OBJECT_VERSION) + // version
mem::size_of_val(&bc_data_deflated_size) + // data size field
bc_data_deflated_size as uint; // actual data
bc_data_deflated_size as usize; // actual data
// If the number of bytes written to the object so far is odd, add a
// padding byte to make it even. This works around a crash bug in LLDB
@ -1154,7 +1154,7 @@ fn add_upstream_rust_crates(cmd: &mut Command, sess: &Session,
// We may not pass all crates through to the linker. Some crates may
// appear statically in an existing dylib, meaning we'll pick up all the
// symbols from the dylib.
let kind = match data[cnum as uint - 1] {
let kind = match data[cnum as usize - 1] {
Some(t) => t,
None => continue
};

Some files were not shown because too many files have changed in this diff Show More