rust/src/liballoc/slice.rs

2287 lines
74 KiB
Rust
Raw Normal View History

// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A dynamically-sized view into a contiguous sequence, `[T]`.
2014-08-04 10:48:39 +00:00
//!
//! Slices are a view into a block of memory represented as a pointer and a
//! length.
2014-08-04 10:48:39 +00:00
//!
//! ```
2014-08-04 10:48:39 +00:00
//! // slicing a Vec
//! let vec = vec![1, 2, 3];
//! let int_slice = &vec[..];
2014-08-04 10:48:39 +00:00
//! // coercing an array to a slice
2014-11-17 08:39:01 +00:00
//! let str_slice: &[&str] = &["one", "two", "three"];
2014-08-04 10:48:39 +00:00
//! ```
//!
//! Slices are either mutable or shared. The shared slice type is `&[T]`,
//! while the mutable slice type is `&mut [T]`, where `T` represents the element
//! type. For example, you can mutate the block of memory that a mutable slice
//! points to:
2014-08-04 10:48:39 +00:00
//!
//! ```
//! let x = &mut [1, 2, 3];
2014-08-04 10:48:39 +00:00
//! x[1] = 7;
//! assert_eq!(x, &[1, 7, 3]);
2014-08-04 10:48:39 +00:00
//! ```
//!
//! Here are some of the things this module contains:
//!
//! ## Structs
//!
2016-10-05 17:58:59 +00:00
//! There are several structs that are useful for slices, such as [`Iter`], which
2014-08-04 10:48:39 +00:00
//! represents iteration over a slice.
//!
//! ## Trait Implementations
2014-08-04 10:48:39 +00:00
//!
//! There are several implementations of common traits for slices. Some examples
//! include:
//!
2016-10-05 17:58:59 +00:00
//! * [`Clone`]
//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
//! * [`Hash`] - for slices whose element type is [`Hash`].
2014-08-04 10:48:39 +00:00
//!
//! ## Iteration
//!
2015-03-29 02:06:42 +00:00
//! The slices implement `IntoIterator`. The iterator yields references to the
//! slice elements.
2014-08-04 10:48:39 +00:00
//!
//! ```
//! let numbers = &[0, 1, 2];
//! for n in numbers {
//! println!("{} is a number!", n);
2014-08-04 10:48:39 +00:00
//! }
//! ```
//!
//! The mutable slice yields mutable references to the elements:
//!
//! ```
//! let mut scores = [7, 8, 9];
//! for score in &mut scores[..] {
//! *score += 1;
//! }
//! ```
//!
//! This iterator yields mutable references to the slice's elements, so while
//! the element type of the slice is `i32`, the element type of the iterator is
//! `&mut i32`.
//!
//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
//! iterators.
//! * Further methods that return iterators are [`.split`], [`.splitn`],
//! [`.chunks`], [`.windows`] and more.
//!
//! *[See also the slice primitive type](../../std/primitive.slice.html).*
2016-10-05 17:58:59 +00:00
//!
//! [`Clone`]: ../../std/clone/trait.Clone.html
//! [`Eq`]: ../../std/cmp/trait.Eq.html
//! [`Ord`]: ../../std/cmp/trait.Ord.html
//! [`Iter`]: struct.Iter.html
//! [`Hash`]: ../../std/hash/trait.Hash.html
//! [`.iter`]: ../../std/primitive.slice.html#method.iter
//! [`.iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
//! [`.split`]: ../../std/primitive.slice.html#method.split
//! [`.splitn`]: ../../std/primitive.slice.html#method.splitn
//! [`.chunks`]: ../../std/primitive.slice.html#method.chunks
//! [`.windows`]: ../../std/primitive.slice.html#method.windows
2015-01-24 05:48:20 +00:00
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
#![cfg_attr(test, allow(unused_imports, dead_code))]
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
use core::cmp::Ordering::{self, Less};
std: Recreate a `collections` module As with the previous commit with `librand`, this commit shuffles around some `collections` code. The new state of the world is similar to that of librand: * The libcollections crate now only depends on libcore and liballoc. * The standard library has a new module, `std::collections`. All functionality of libcollections is reexported through this module. I would like to stress that this change is purely cosmetic. There are very few alterations to these primitives. There are a number of notable points about the new organization: * std::{str, slice, string, vec} all moved to libcollections. There is no reason that these primitives shouldn't be necessarily usable in a freestanding context that has allocation. These are all reexported in their usual places in the standard library. * The `hashmap`, and transitively the `lru_cache`, modules no longer reside in `libcollections`, but rather in libstd. The reason for this is because the `HashMap::new` contructor requires access to the OSRng for initially seeding the hash map. Beyond this requirement, there is no reason that the hashmap could not move to libcollections. I do, however, have a plan to move the hash map to the collections module. The `HashMap::new` function could be altered to require that the `H` hasher parameter ascribe to the `Default` trait, allowing the entire `hashmap` module to live in libcollections. The key idea would be that the default hasher would be different in libstd. Something along the lines of: // src/libstd/collections/mod.rs pub type HashMap<K, V, H = RandomizedSipHasher> = core_collections::HashMap<K, V, H>; This is not possible today because you cannot invoke static methods through type aliases. If we modified the compiler, however, to allow invocation of static methods through type aliases, then this type definition would essentially be switching the default hasher from `SipHasher` in libcollections to a libstd-defined `RandomizedSipHasher` type. This type's `Default` implementation would randomly seed the `SipHasher` instance, and otherwise perform the same as `SipHasher`. This future state doesn't seem incredibly far off, but until that time comes, the hashmap module will live in libstd to not compromise on functionality. * In preparation for the hashmap moving to libcollections, the `hash` module has moved from libstd to libcollections. A previously snapshotted commit enables a distinct `Writer` trait to live in the `hash` module which `Hash` implementations are now parameterized over. Due to using a custom trait, the `SipHasher` implementation has lost its specialized methods for writing integers. These can be re-added backwards-compatibly in the future via default methods if necessary, but the FNV hashing should satisfy much of the need for speedier hashing. A list of breaking changes: * HashMap::{get, get_mut} no longer fails with the key formatted into the error message with `{:?}`, instead, a generic message is printed. With backtraces, it should still be not-too-hard to track down errors. * The HashMap, HashSet, and LruCache types are now available through std::collections instead of the collections crate. * Manual implementations of hash should be parameterized over `hash::Writer` instead of just `Writer`. [breaking-change]
2014-05-30 01:50:12 +00:00
use core::mem::size_of;
use core::mem;
use core::ptr;
use core::slice as core_slice;
use borrow::{Borrow, BorrowMut, ToOwned};
2017-06-13 22:52:59 +00:00
use boxed::Box;
use vec::Vec;
2015-11-16 16:54:28 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{Chunks, Windows};
2015-11-16 16:54:28 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{Iter, IterMut};
2015-11-16 16:54:28 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{SplitMut, ChunksMut, Split};
2015-11-16 16:54:28 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{SplitN, RSplitN, SplitNMut, RSplitNMut};
#[unstable(feature = "slice_rsplit", issue = "41020")]
pub use core::slice::{RSplit, RSplitMut};
2015-11-16 16:54:28 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{from_raw_parts, from_raw_parts_mut};
#[unstable(feature = "from_ref", issue = "45703")]
pub use core::slice::{from_ref, from_ref_mut};
2016-07-19 08:50:52 +00:00
#[unstable(feature = "slice_get_slice", issue = "35729")]
pub use core::slice::SliceIndex;
#[unstable(feature = "exact_chunks", issue = "47115")]
pub use core::slice::{ExactChunks, ExactChunksMut};
////////////////////////////////////////////////////////////////////////////////
// Basic slice extension methods
////////////////////////////////////////////////////////////////////////////////
// HACK(japaric) needed for the implementation of `vec!` macro during testing
// NB see the hack module in this file for more details
2015-03-12 00:44:02 +00:00
#[cfg(test)]
pub use self::hack::into_vec;
2015-03-12 00:44:02 +00:00
// HACK(japaric) needed for the implementation of `Vec::clone` during testing
// NB see the hack module in this file for more details
2015-03-12 00:44:02 +00:00
#[cfg(test)]
pub use self::hack::to_vec;
2015-03-18 16:36:18 +00:00
// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
// functions are actually methods that are in `impl [T]` but not in
// `core::slice::SliceExt` - we need to supply these functions for the
// `test_permutations` test
mod hack {
2017-06-13 22:52:59 +00:00
use boxed::Box;
use core::mem;
#[cfg(test)]
use string::ToString;
use vec::Vec;
pub fn into_vec<T>(mut b: Box<[T]>) -> Vec<T> {
unsafe {
let xs = Vec::from_raw_parts(b.as_mut_ptr(), b.len(), b.len());
mem::forget(b);
xs
}
2015-03-12 00:44:02 +00:00
}
#[inline]
2015-11-23 22:23:48 +00:00
pub fn to_vec<T>(s: &[T]) -> Vec<T>
where T: Clone
{
let mut vector = Vec::with_capacity(s.len());
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
vector.extend_from_slice(s);
vector
}
2015-03-12 00:44:02 +00:00
}
2015-03-11 04:13:29 +00:00
#[lang = "slice"]
2015-03-12 00:44:02 +00:00
#[cfg(not(test))]
2015-03-11 04:13:29 +00:00
impl<T> [T] {
/// Returns the number of elements in the slice.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn len(&self) -> usize {
core_slice::SliceExt::len(self)
2015-03-11 04:13:29 +00:00
}
/// Returns `true` if the slice has a length of 0.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
2015-03-11 04:13:29 +00:00
#[inline]
pub fn is_empty(&self) -> bool {
core_slice::SliceExt::is_empty(self)
2015-03-11 04:13:29 +00:00
}
/// Returns the first element of the slice, or `None` if it is empty.
2015-03-11 04:13:29 +00:00
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first(&self) -> Option<&T> {
core_slice::SliceExt::first(self)
2015-03-11 04:13:29 +00:00
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first_mut(&mut self) -> Option<&mut T> {
core_slice::SliceExt::first_mut(self)
2015-03-11 04:13:29 +00:00
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
std: Stabilize library APIs for 1.5 This commit stabilizes and deprecates library APIs whose FCP has closed in the last cycle, specifically: Stabilized APIs: * `fs::canonicalize` * `Path::{metadata, symlink_metadata, canonicalize, read_link, read_dir, exists, is_file, is_dir}` - all moved to inherent methods from the `PathExt` trait. * `Formatter::fill` * `Formatter::width` * `Formatter::precision` * `Formatter::sign_plus` * `Formatter::sign_minus` * `Formatter::alternate` * `Formatter::sign_aware_zero_pad` * `string::ParseError` * `Utf8Error::valid_up_to` * `Iterator::{cmp, partial_cmp, eq, ne, lt, le, gt, ge}` * `<[T]>::split_{first,last}{,_mut}` * `Condvar::wait_timeout` - note that `wait_timeout_ms` is not yet deprecated but will be once 1.5 is released. * `str::{R,}MatchIndices` * `str::{r,}match_indices` * `char::from_u32_unchecked` * `VecDeque::insert` * `VecDeque::shrink_to_fit` * `VecDeque::as_slices` * `VecDeque::as_mut_slices` * `VecDeque::swap_remove_front` - (renamed from `swap_front_remove`) * `VecDeque::swap_remove_back` - (renamed from `swap_back_remove`) * `Vec::resize` * `str::slice_mut_unchecked` * `FileTypeExt` * `FileTypeExt::{is_block_device, is_char_device, is_fifo, is_socket}` * `BinaryHeap::from` - `from_vec` deprecated in favor of this * `BinaryHeap::into_vec` - plus a `Into` impl * `BinaryHeap::into_sorted_vec` Deprecated APIs * `slice::ref_slice` * `slice::mut_ref_slice` * `iter::{range_inclusive, RangeInclusive}` * `std::dynamic_lib` Closes #27706 Closes #27725 cc #27726 (align not stabilized yet) Closes #27734 Closes #27737 Closes #27742 Closes #27743 Closes #27772 Closes #27774 Closes #27777 Closes #27781 cc #27788 (a few remaining methods though) Closes #27790 Closes #27793 Closes #27796 Closes #27810 cc #28147 (not all parts stabilized)
2015-10-22 23:28:45 +00:00
#[stable(feature = "slice_splits", since = "1.5.0")]
2015-07-11 11:34:57 +00:00
#[inline]
pub fn split_first(&self) -> Option<(&T, &[T])> {
core_slice::SliceExt::split_first(self)
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
std: Stabilize library APIs for 1.5 This commit stabilizes and deprecates library APIs whose FCP has closed in the last cycle, specifically: Stabilized APIs: * `fs::canonicalize` * `Path::{metadata, symlink_metadata, canonicalize, read_link, read_dir, exists, is_file, is_dir}` - all moved to inherent methods from the `PathExt` trait. * `Formatter::fill` * `Formatter::width` * `Formatter::precision` * `Formatter::sign_plus` * `Formatter::sign_minus` * `Formatter::alternate` * `Formatter::sign_aware_zero_pad` * `string::ParseError` * `Utf8Error::valid_up_to` * `Iterator::{cmp, partial_cmp, eq, ne, lt, le, gt, ge}` * `<[T]>::split_{first,last}{,_mut}` * `Condvar::wait_timeout` - note that `wait_timeout_ms` is not yet deprecated but will be once 1.5 is released. * `str::{R,}MatchIndices` * `str::{r,}match_indices` * `char::from_u32_unchecked` * `VecDeque::insert` * `VecDeque::shrink_to_fit` * `VecDeque::as_slices` * `VecDeque::as_mut_slices` * `VecDeque::swap_remove_front` - (renamed from `swap_front_remove`) * `VecDeque::swap_remove_back` - (renamed from `swap_back_remove`) * `Vec::resize` * `str::slice_mut_unchecked` * `FileTypeExt` * `FileTypeExt::{is_block_device, is_char_device, is_fifo, is_socket}` * `BinaryHeap::from` - `from_vec` deprecated in favor of this * `BinaryHeap::into_vec` - plus a `Into` impl * `BinaryHeap::into_sorted_vec` Deprecated APIs * `slice::ref_slice` * `slice::mut_ref_slice` * `iter::{range_inclusive, RangeInclusive}` * `std::dynamic_lib` Closes #27706 Closes #27725 cc #27726 (align not stabilized yet) Closes #27734 Closes #27737 Closes #27742 Closes #27743 Closes #27772 Closes #27774 Closes #27777 Closes #27781 cc #27788 (a few remaining methods though) Closes #27790 Closes #27793 Closes #27796 Closes #27810 cc #28147 (not all parts stabilized)
2015-10-22 23:28:45 +00:00
#[stable(feature = "slice_splits", since = "1.5.0")]
2015-07-11 11:34:57 +00:00
#[inline]
pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
core_slice::SliceExt::split_first_mut(self)
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
std: Stabilize library APIs for 1.5 This commit stabilizes and deprecates library APIs whose FCP has closed in the last cycle, specifically: Stabilized APIs: * `fs::canonicalize` * `Path::{metadata, symlink_metadata, canonicalize, read_link, read_dir, exists, is_file, is_dir}` - all moved to inherent methods from the `PathExt` trait. * `Formatter::fill` * `Formatter::width` * `Formatter::precision` * `Formatter::sign_plus` * `Formatter::sign_minus` * `Formatter::alternate` * `Formatter::sign_aware_zero_pad` * `string::ParseError` * `Utf8Error::valid_up_to` * `Iterator::{cmp, partial_cmp, eq, ne, lt, le, gt, ge}` * `<[T]>::split_{first,last}{,_mut}` * `Condvar::wait_timeout` - note that `wait_timeout_ms` is not yet deprecated but will be once 1.5 is released. * `str::{R,}MatchIndices` * `str::{r,}match_indices` * `char::from_u32_unchecked` * `VecDeque::insert` * `VecDeque::shrink_to_fit` * `VecDeque::as_slices` * `VecDeque::as_mut_slices` * `VecDeque::swap_remove_front` - (renamed from `swap_front_remove`) * `VecDeque::swap_remove_back` - (renamed from `swap_back_remove`) * `Vec::resize` * `str::slice_mut_unchecked` * `FileTypeExt` * `FileTypeExt::{is_block_device, is_char_device, is_fifo, is_socket}` * `BinaryHeap::from` - `from_vec` deprecated in favor of this * `BinaryHeap::into_vec` - plus a `Into` impl * `BinaryHeap::into_sorted_vec` Deprecated APIs * `slice::ref_slice` * `slice::mut_ref_slice` * `iter::{range_inclusive, RangeInclusive}` * `std::dynamic_lib` Closes #27706 Closes #27725 cc #27726 (align not stabilized yet) Closes #27734 Closes #27737 Closes #27742 Closes #27743 Closes #27772 Closes #27774 Closes #27777 Closes #27781 cc #27788 (a few remaining methods though) Closes #27790 Closes #27793 Closes #27796 Closes #27810 cc #28147 (not all parts stabilized)
2015-10-22 23:28:45 +00:00
#[stable(feature = "slice_splits", since = "1.5.0")]
2015-07-11 11:34:57 +00:00
#[inline]
pub fn split_last(&self) -> Option<(&T, &[T])> {
core_slice::SliceExt::split_last(self)
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
std: Stabilize library APIs for 1.5 This commit stabilizes and deprecates library APIs whose FCP has closed in the last cycle, specifically: Stabilized APIs: * `fs::canonicalize` * `Path::{metadata, symlink_metadata, canonicalize, read_link, read_dir, exists, is_file, is_dir}` - all moved to inherent methods from the `PathExt` trait. * `Formatter::fill` * `Formatter::width` * `Formatter::precision` * `Formatter::sign_plus` * `Formatter::sign_minus` * `Formatter::alternate` * `Formatter::sign_aware_zero_pad` * `string::ParseError` * `Utf8Error::valid_up_to` * `Iterator::{cmp, partial_cmp, eq, ne, lt, le, gt, ge}` * `<[T]>::split_{first,last}{,_mut}` * `Condvar::wait_timeout` - note that `wait_timeout_ms` is not yet deprecated but will be once 1.5 is released. * `str::{R,}MatchIndices` * `str::{r,}match_indices` * `char::from_u32_unchecked` * `VecDeque::insert` * `VecDeque::shrink_to_fit` * `VecDeque::as_slices` * `VecDeque::as_mut_slices` * `VecDeque::swap_remove_front` - (renamed from `swap_front_remove`) * `VecDeque::swap_remove_back` - (renamed from `swap_back_remove`) * `Vec::resize` * `str::slice_mut_unchecked` * `FileTypeExt` * `FileTypeExt::{is_block_device, is_char_device, is_fifo, is_socket}` * `BinaryHeap::from` - `from_vec` deprecated in favor of this * `BinaryHeap::into_vec` - plus a `Into` impl * `BinaryHeap::into_sorted_vec` Deprecated APIs * `slice::ref_slice` * `slice::mut_ref_slice` * `iter::{range_inclusive, RangeInclusive}` * `std::dynamic_lib` Closes #27706 Closes #27725 cc #27726 (align not stabilized yet) Closes #27734 Closes #27737 Closes #27742 Closes #27743 Closes #27772 Closes #27774 Closes #27777 Closes #27781 cc #27788 (a few remaining methods though) Closes #27790 Closes #27793 Closes #27796 Closes #27810 cc #28147 (not all parts stabilized)
2015-10-22 23:28:45 +00:00
#[stable(feature = "slice_splits", since = "1.5.0")]
2015-07-11 11:34:57 +00:00
#[inline]
pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
core_slice::SliceExt::split_last_mut(self)
}
/// Returns the last element of the slice, or `None` if it is empty.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
2015-03-11 04:13:29 +00:00
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last(&self) -> Option<&T> {
core_slice::SliceExt::last(self)
2015-03-11 04:13:29 +00:00
}
/// Returns a mutable pointer to the last item in the slice.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
core_slice::SliceExt::last_mut(self)
2015-03-11 04:13:29 +00:00
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
2015-03-11 04:13:29 +00:00
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
2015-03-11 04:13:29 +00:00
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2016-07-19 08:50:52 +00:00
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where I: SliceIndex<Self>
2016-07-19 08:50:52 +00:00
{
2015-03-11 04:13:29 +00:00
core_slice::SliceExt::get(self, index)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: #method.get
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2016-07-19 08:50:52 +00:00
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where I: SliceIndex<Self>
2016-07-19 08:50:52 +00:00
{
core_slice::SliceExt::get_mut(self, index)
2015-03-11 04:13:29 +00:00
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// This is generally not recommended, use with caution! For a safe
/// alternative see [`get`].
///
/// [`get`]: #method.get
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
2015-03-11 04:13:29 +00:00
#[inline]
2016-07-19 08:50:52 +00:00
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where I: SliceIndex<Self>
2016-07-19 08:50:52 +00:00
{
core_slice::SliceExt::get_unchecked(self, index)
2015-03-11 04:13:29 +00:00
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// This is generally not recommended, use with caution! For a safe
/// alternative see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
2015-03-11 04:13:29 +00:00
#[inline]
2016-07-19 08:50:52 +00:00
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where I: SliceIndex<Self>
2016-07-19 08:50:52 +00:00
{
core_slice::SliceExt::get_unchecked_mut(self, index)
2015-03-11 04:13:29 +00:00
}
/// Returns a raw pointer to the slice's buffer.
2015-03-11 04:13:29 +00:00
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
2015-03-11 04:13:29 +00:00
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.offset(i as isize));
/// }
/// }
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn as_ptr(&self) -> *const T {
core_slice::SliceExt::as_ptr(self)
2015-03-11 04:13:29 +00:00
}
/// Returns an unsafe mutable pointer to the slice's buffer.
2015-03-11 04:13:29 +00:00
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.offset(i as isize) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
core_slice::SliceExt::as_mut_ptr(self)
2015-03-11 04:13:29 +00:00
}
/// Swaps two elements in the slice.
2015-03-11 04:13:29 +00:00
///
/// # Arguments
2015-03-11 04:13:29 +00:00
///
/// * a - The index of the first element
/// * b - The index of the second element
2015-03-11 04:13:29 +00:00
///
/// # Panics
2015-03-11 04:13:29 +00:00
///
/// Panics if `a` or `b` are out of bounds.
2015-03-11 04:13:29 +00:00
///
2016-07-08 15:21:16 +00:00
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
core_slice::SliceExt::swap(self, a, b)
2015-03-11 04:13:29 +00:00
}
/// Reverses the order of elements in the slice, in place.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
core_slice::SliceExt::reverse(self)
2015-03-11 04:13:29 +00:00
}
/// Returns an iterator over the slice.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<T> {
core_slice::SliceExt::iter(self)
}
2016-07-08 15:21:16 +00:00
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
2016-07-08 15:21:16 +00:00
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<T> {
core_slice::SliceExt::iter_mut(self)
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<T> {
core_slice::SliceExt::windows(self, size)
2015-03-11 04:13:29 +00:00
}
/// Returns an iterator over `chunk_size` elements of the slice at a
/// time. The chunks are slices and do not overlap. If `chunk_size` does
/// not divide the length of the slice, then the last chunk will
/// not have length `chunk_size`.
///
/// See [`exact_chunks`] for a variant of this iterator that returns chunks
/// of always exactly `chunk_size` elements.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`exact_chunks`]: #method.exact_chunks
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<T> {
core_slice::SliceExt::chunks(self, chunk_size)
2015-03-11 04:13:29 +00:00
}
/// Returns an iterator over `chunk_size` elements of the slice at a
/// time. The chunks are slices and do not overlap. If `chunk_size` does
/// not divide the length of the slice, then the last up to `chunk_size-1`
/// elements will be omitted.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler
/// can often optimize the resulting code better than in the case of
/// [`chunks`].
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// #![feature(exact_chunks)]
///
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.exact_chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks`]: #method.chunks
#[unstable(feature = "exact_chunks", issue = "47115")]
#[inline]
pub fn exact_chunks(&self, chunk_size: usize) -> ExactChunks<T> {
core_slice::SliceExt::exact_chunks(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time.
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does
/// not divide the length of the slice, then the last chunk will not
/// have length `chunk_size`.
///
/// See [`exact_chunks_mut`] for a variant of this iterator that returns chunks
/// of always exactly `chunk_size` elements.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`exact_chunks_mut`]: #method.exact_chunks_mut
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> {
core_slice::SliceExt::chunks_mut(self, chunk_size)
2015-03-11 04:13:29 +00:00
}
/// Returns an iterator over `chunk_size` elements of the slice at a time.
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does
/// not divide the length of the slice, then the last up to `chunk_size-1`
/// elements will be omitted.
///
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler
/// can often optimize the resulting code better than in the case of
/// [`chunks_mut`].
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// #![feature(exact_chunks)]
///
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.exact_chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
#[unstable(feature = "exact_chunks", issue = "47115")]
#[inline]
pub fn exact_chunks_mut(&mut self, chunk_size: usize) -> ExactChunksMut<T> {
core_slice::SliceExt::exact_chunks_mut(self, chunk_size)
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert!(left == []);
/// assert!(right == [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert!(left == [1, 2]);
/// assert!(right == [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert!(left == [1, 2, 3, 4, 5, 6]);
/// assert!(right == []);
/// }
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
core_slice::SliceExt::split_at(self, mid)
2015-03-11 04:13:29 +00:00
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
2016-07-08 15:21:16 +00:00
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// {
/// let (left, right) = v.split_at_mut(2);
/// assert!(left == [1, 0]);
/// assert!(right == [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert!(v == [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
2015-03-11 04:13:29 +00:00
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
core_slice::SliceExt::split_at_mut(self, mid)
2015-03-11 04:13:29 +00:00
}
/// Returns an iterator over subslices separated by elements that match
2016-07-08 15:21:16 +00:00
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
2016-07-08 15:21:16 +00:00
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2015-11-23 22:23:48 +00:00
pub fn split<F>(&self, pred: F) -> Split<T, F>
where F: FnMut(&T) -> bool
{
core_slice::SliceExt::split(self, pred)
2015-03-11 04:13:29 +00:00
}
/// Returns an iterator over mutable subslices separated by elements that
2016-07-08 15:21:16 +00:00
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2015-11-23 22:23:48 +00:00
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F>
where F: FnMut(&T) -> bool
{
2015-03-11 04:13:29 +00:00
core_slice::SliceExt::split_mut(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// #![feature(slice_rsplit)]
///
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// #![feature(slice_rsplit)]
///
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[unstable(feature = "slice_rsplit", issue = "41020")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<T, F>
where F: FnMut(&T) -> bool
{
core_slice::SliceExt::rsplit(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// #![feature(slice_rsplit)]
///
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[unstable(feature = "slice_rsplit", issue = "41020")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F>
where F: FnMut(&T) -> bool
{
core_slice::SliceExt::rsplit_mut(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e. `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
2016-07-08 15:21:16 +00:00
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2015-11-23 22:23:48 +00:00
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<T, F>
where F: FnMut(&T) -> bool
{
core_slice::SliceExt::splitn(self, n, pred)
}
2015-03-11 04:13:29 +00:00
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
2015-03-11 04:13:29 +00:00
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<T, F>
2015-11-23 22:23:48 +00:00
where F: FnMut(&T) -> bool
{
2015-03-11 04:13:29 +00:00
core_slice::SliceExt::splitn_mut(self, n, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e. `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
2016-07-08 15:21:16 +00:00
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2015-11-23 22:23:48 +00:00
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<T, F>
where F: FnMut(&T) -> bool
{
core_slice::SliceExt::rsplitn(self, n, pred)
}
2015-03-11 04:13:29 +00:00
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
2015-03-11 04:13:29 +00:00
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2015-11-23 22:23:48 +00:00
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<T, F>
where F: FnMut(&T) -> bool
{
2015-03-11 04:13:29 +00:00
core_slice::SliceExt::rsplitn_mut(self, n, pred)
}
/// Returns `true` if the slice contains an element with the given value.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
2015-11-23 22:23:48 +00:00
pub fn contains(&self, x: &T) -> bool
where T: PartialEq
{
core_slice::SliceExt::contains(self, x)
2015-03-11 04:13:29 +00:00
}
/// Returns `true` if `needle` is a prefix of the slice.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
2015-11-23 22:23:48 +00:00
pub fn starts_with(&self, needle: &[T]) -> bool
where T: PartialEq
{
core_slice::SliceExt::starts_with(self, needle)
}
/// Returns `true` if `needle` is a suffix of the slice.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
2015-11-23 22:23:48 +00:00
pub fn ends_with(&self, needle: &[T]) -> bool
where T: PartialEq
{
core_slice::SliceExt::ends_with(self, needle)
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then `Ok` is returned, containing the
/// index of the matching element; if the value is not found then
/// `Err` is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1...4) => true, _ => false, });
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
2015-11-23 22:23:48 +00:00
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where T: Ord
{
core_slice::SliceExt::binary_search(self, x)
2015-03-11 04:13:29 +00:00
}
/// Binary searches this sorted slice with a comparator function.
2015-03-11 04:13:29 +00:00
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
2015-03-11 04:13:29 +00:00
///
/// If a matching value is found then returns `Ok`, containing
/// the index for the matched element; if no match is found then
/// `Err` is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
2015-03-11 04:13:29 +00:00
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
2015-03-11 04:13:29 +00:00
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1...4) => true, _ => false, });
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result<usize, usize>
where F: FnMut(&'a T) -> Ordering
2015-11-23 22:23:48 +00:00
{
core_slice::SliceExt::binary_search_by(self, f)
2015-03-11 04:13:29 +00:00
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If a matching value is found then returns `Ok`, containing the
/// index for the matched element; if no match is found then `Err`
/// is returned, containing the index where a matching element could
/// be inserted while maintaining sorted order.
///
/// [`sort_by_key`]: #method.sort_by_key
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a,b)| b);
/// assert!(match r { Ok(1...4) => true, _ => false, });
/// ```
std: Stabilize APIs for the 1.10 release This commit applies the FCP decisions made by the libs team for the 1.10 cycle, including both new stabilizations and deprecations. Specifically, the list of APIs is: Stabilized: * `os::windows::fs::OpenOptionsExt::access_mode` * `os::windows::fs::OpenOptionsExt::share_mode` * `os::windows::fs::OpenOptionsExt::custom_flags` * `os::windows::fs::OpenOptionsExt::attributes` * `os::windows::fs::OpenOptionsExt::security_qos_flags` * `os::unix::fs::OpenOptionsExt::custom_flags` * `sync::Weak::new` * `Default for sync::Weak` * `panic::set_hook` * `panic::take_hook` * `panic::PanicInfo` * `panic::PanicInfo::payload` * `panic::PanicInfo::location` * `panic::Location` * `panic::Location::file` * `panic::Location::line` * `ffi::CStr::from_bytes_with_nul` * `ffi::CStr::from_bytes_with_nul_unchecked` * `ffi::FromBytesWithNulError` * `fs::Metadata::modified` * `fs::Metadata::accessed` * `fs::Metadata::created` * `sync::atomic::Atomic{Usize,Isize,Bool,Ptr}::compare_exchange` * `sync::atomic::Atomic{Usize,Isize,Bool,Ptr}::compare_exchange_weak` * `collections::{btree,hash}_map::{Occupied,Vacant,}Entry::key` * `os::unix::net::{UnixStream, UnixListener, UnixDatagram, SocketAddr}` * `SocketAddr::is_unnamed` * `SocketAddr::as_pathname` * `UnixStream::connect` * `UnixStream::pair` * `UnixStream::try_clone` * `UnixStream::local_addr` * `UnixStream::peer_addr` * `UnixStream::set_read_timeout` * `UnixStream::set_write_timeout` * `UnixStream::read_timeout` * `UnixStream::write_Timeout` * `UnixStream::set_nonblocking` * `UnixStream::take_error` * `UnixStream::shutdown` * Read/Write/RawFd impls for `UnixStream` * `UnixListener::bind` * `UnixListener::accept` * `UnixListener::try_clone` * `UnixListener::local_addr` * `UnixListener::set_nonblocking` * `UnixListener::take_error` * `UnixListener::incoming` * RawFd impls for `UnixListener` * `UnixDatagram::bind` * `UnixDatagram::unbound` * `UnixDatagram::pair` * `UnixDatagram::connect` * `UnixDatagram::try_clone` * `UnixDatagram::local_addr` * `UnixDatagram::peer_addr` * `UnixDatagram::recv_from` * `UnixDatagram::recv` * `UnixDatagram::send_to` * `UnixDatagram::send` * `UnixDatagram::set_read_timeout` * `UnixDatagram::set_write_timeout` * `UnixDatagram::read_timeout` * `UnixDatagram::write_timeout` * `UnixDatagram::set_nonblocking` * `UnixDatagram::take_error` * `UnixDatagram::shutdown` * RawFd impls for `UnixDatagram` * `{BTree,Hash}Map::values_mut` * `<[_]>::binary_search_by_key` Deprecated: * `StaticCondvar` - this, and all other static synchronization primitives below, are usable today through the lazy-static crate on stable Rust today. Additionally, we'd like the non-static versions to be directly usable in a static context one day, so they're unlikely to be the final forms of the APIs in any case. * `CONDVAR_INIT` * `StaticMutex` * `MUTEX_INIT` * `StaticRwLock` * `RWLOCK_INIT` * `iter::Peekable::is_empty` Closes #27717 Closes #27720 cc #27784 (but encode methods still exist) Closes #30014 Closes #30425 Closes #30449 Closes #31190 Closes #31399 Closes #31767 Closes #32111 Closes #32281 Closes #32312 Closes #32551 Closes #33018
2016-05-17 18:57:07 +00:00
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result<usize, usize>
where F: FnMut(&'a T) -> B,
B: Ord
{
core_slice::SliceExt::binary_search_by_key(self, b, f)
}
/// Sorts the slice.
///
2017-01-14 00:46:30 +00:00
/// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
///
2017-07-02 00:09:21 +00:00
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`sort_unstable`](#method.sort_unstable).
///
2017-01-14 00:46:30 +00:00
/// # Current implementation
2017-01-20 08:11:11 +00:00
///
2017-01-14 00:46:30 +00:00
/// The current algorithm is an adaptive, iterative merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage half the size of `self`, but for short slices a
/// non-allocating insertion sort is used instead.
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort();
/// assert!(v == [-5, -3, 1, 2, 4]);
2015-03-11 04:13:29 +00:00
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2015-11-23 22:23:48 +00:00
pub fn sort(&mut self)
where T: Ord
{
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
merge_sort(self, |a, b| a.lt(b));
2015-03-11 04:13:29 +00:00
}
/// Sorts the slice with a comparator function.
2017-03-17 14:05:44 +00:00
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
///
2017-07-02 00:09:21 +00:00
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`sort_unstable_by`](#method.sort_unstable_by).
///
2017-03-17 14:05:44 +00:00
/// # Current implementation
///
/// The current algorithm is an adaptive, iterative merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage half the size of `self`, but for short slices a
/// non-allocating insertion sort is used instead.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn sort_by<F>(&mut self, mut compare: F)
where F: FnMut(&T, &T) -> Ordering
{
merge_sort(self, |a, b| compare(a, b) == Less);
}
/// Sorts the slice with a key extraction function.
2017-01-14 00:46:30 +00:00
///
2018-03-01 12:15:05 +00:00
/// During sorting, the key function is called only once per element.
///
2018-03-01 12:15:05 +00:00
/// This sort is stable (i.e. does not reorder equal elements) and `O(m n + n log n)`
/// worst-case, where the key function is `O(m)`.
2017-07-02 00:09:21 +00:00
///
2017-01-14 00:46:30 +00:00
/// # Current implementation
///
/// The current algorithm is an adaptive, iterative merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
///
2018-03-01 16:07:58 +00:00
/// The algorithm allocates temporary storage in a `Vec<(K, usize)` the length of the slice.
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
///
/// # Examples
///
/// ```
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
std: Stabilize APIs for the 1.7 release This commit stabilizes and deprecates the FCP (final comment period) APIs for the upcoming 1.7 beta release. The specific APIs which changed were: Stabilized * `Path::strip_prefix` (renamed from `relative_from`) * `path::StripPrefixError` (new error type returned from `strip_prefix`) * `Ipv4Addr::is_loopback` * `Ipv4Addr::is_private` * `Ipv4Addr::is_link_local` * `Ipv4Addr::is_multicast` * `Ipv4Addr::is_broadcast` * `Ipv4Addr::is_documentation` * `Ipv6Addr::is_unspecified` * `Ipv6Addr::is_loopback` * `Ipv6Addr::is_unique_local` * `Ipv6Addr::is_multicast` * `Vec::as_slice` * `Vec::as_mut_slice` * `String::as_str` * `String::as_mut_str` * `<[T]>::clone_from_slice` - the `usize` return value is removed * `<[T]>::sort_by_key` * `i32::checked_rem` (and other signed types) * `i32::checked_neg` (and other signed types) * `i32::checked_shl` (and other signed types) * `i32::checked_shr` (and other signed types) * `i32::saturating_mul` (and other signed types) * `i32::overflowing_add` (and other signed types) * `i32::overflowing_sub` (and other signed types) * `i32::overflowing_mul` (and other signed types) * `i32::overflowing_div` (and other signed types) * `i32::overflowing_rem` (and other signed types) * `i32::overflowing_neg` (and other signed types) * `i32::overflowing_shl` (and other signed types) * `i32::overflowing_shr` (and other signed types) * `u32::checked_rem` (and other unsigned types) * `u32::checked_neg` (and other unsigned types) * `u32::checked_shl` (and other unsigned types) * `u32::saturating_mul` (and other unsigned types) * `u32::overflowing_add` (and other unsigned types) * `u32::overflowing_sub` (and other unsigned types) * `u32::overflowing_mul` (and other unsigned types) * `u32::overflowing_div` (and other unsigned types) * `u32::overflowing_rem` (and other unsigned types) * `u32::overflowing_neg` (and other unsigned types) * `u32::overflowing_shl` (and other unsigned types) * `u32::overflowing_shr` (and other unsigned types) * `ffi::IntoStringError` * `CString::into_string` * `CString::into_bytes` * `CString::into_bytes_with_nul` * `From<CString> for Vec<u8>` * `From<CString> for Vec<u8>` * `IntoStringError::into_cstring` * `IntoStringError::utf8_error` * `Error for IntoStringError` Deprecated * `Path::relative_from` - renamed to `strip_prefix` * `Path::prefix` - use `components().next()` instead * `os::unix::fs` constants - moved to the `libc` crate * `fmt::{radix, Radix, RadixFmt}` - not used enough to stabilize * `IntoCow` - conflicts with `Into` and may come back later * `i32::{BITS, BYTES}` (and other integers) - not pulling their weight * `DebugTuple::formatter` - will be removed * `sync::Semaphore` - not used enough and confused with system semaphores Closes #23284 cc #27709 (still lots more methods though) Closes #27712 Closes #27722 Closes #27728 Closes #27735 Closes #27729 Closes #27755 Closes #27782 Closes #27798
2016-01-15 18:07:52 +00:00
#[stable(feature = "slice_sort_by_key", since = "1.7.0")]
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
#[inline]
2018-03-01 16:07:58 +00:00
pub fn sort_by_key<K, F>(&mut self, f: F)
where F: FnMut(&T) -> K, K: Ord
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
{
let mut indices: Vec<_> = self.iter().map(f).enumerate().map(|(i, k)| (k, i)).collect();
// The elements of `indices` are unique, as they are indexed, so any sort will be stable
// with respect to the original slice. We use `sort_unstable` here because it requires less
// memory allocation.
indices.sort_unstable();
for i in 0..self.len() {
let mut index = indices[i].1;
while index < i {
index = indices[index].1;
}
2018-03-02 15:51:20 +00:00
indices[i].1 = index;
self.swap(i, index);
}
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
}
2017-03-17 14:05:44 +00:00
/// Sorts the slice, but may not preserve the order of equal elements.
2017-01-14 00:46:30 +00:00
///
2017-03-17 14:05:44 +00:00
/// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
/// and `O(n log n)` worst-case.
2017-01-14 00:46:30 +00:00
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
2015-03-11 04:13:29 +00:00
///
/// It is typically faster than stable sorting, except in a few special cases, e.g. when the
2017-03-17 14:05:44 +00:00
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
2017-07-02 00:09:21 +00:00
#[stable(feature = "sort_unstable", since = "1.20.0")]
2017-03-17 14:05:44 +00:00
#[inline]
pub fn sort_unstable(&mut self)
where T: Ord
{
core_slice::SliceExt::sort_unstable(self);
}
/// Sorts the slice with a comparator function, but may not preserve the order of equal
/// elements.
2017-03-17 14:05:44 +00:00
///
/// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
/// and `O(n log n)` worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
2017-03-17 14:05:44 +00:00
///
/// It is typically faster than stable sorting, except in a few special cases, e.g. when the
2017-03-17 14:05:44 +00:00
/// slice consists of several concatenated sorted sequences.
2015-03-11 04:13:29 +00:00
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
2017-03-17 14:05:44 +00:00
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
2017-03-17 14:05:44 +00:00
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
2017-03-17 14:05:44 +00:00
///
/// [pdqsort]: https://github.com/orlp/pdqsort
2017-07-02 00:09:21 +00:00
#[stable(feature = "sort_unstable", since = "1.20.0")]
2015-03-11 04:13:29 +00:00
#[inline]
2017-03-17 14:05:44 +00:00
pub fn sort_unstable_by<F>(&mut self, compare: F)
2015-11-23 22:23:48 +00:00
where F: FnMut(&T, &T) -> Ordering
{
2017-03-17 14:05:44 +00:00
core_slice::SliceExt::sort_unstable_by(self, compare);
}
/// Sorts the slice with a key extraction function, but may not preserve the order of equal
/// elements.
2017-03-17 14:05:44 +00:00
///
2018-03-01 16:07:58 +00:00
/// Note that, currently, the key function for [`sort_unstable_by_key`] is called multiple times
/// per element, unlike [`sort_by_key`].
///
2017-03-17 14:05:44 +00:00
/// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate),
/// and `O(m n log m n)` worst-case, where the key function is `O(m)`.
2017-03-17 14:05:44 +00:00
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
2017-03-17 14:05:44 +00:00
///
2018-03-01 16:07:58 +00:00
/// Due to its key calling strategy, [`sort_unstable_by_key`] is likely to be slower than
/// [`sort_by_key`] in cases where the key function is expensive.
2017-03-17 14:05:44 +00:00
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
2017-03-17 22:17:59 +00:00
/// ```
2017-03-17 14:05:44 +00:00
///
2018-03-01 16:07:58 +00:00
/// [`sort_by_key`]: #method.sort_by_key
/// [`sort_unstable_by_key`]: #method.sort_unstable_by_key
2017-03-17 14:05:44 +00:00
/// [pdqsort]: https://github.com/orlp/pdqsort
2017-07-02 00:09:21 +00:00
#[stable(feature = "sort_unstable", since = "1.20.0")]
2017-03-17 14:05:44 +00:00
#[inline]
2018-03-01 16:07:58 +00:00
pub fn sort_unstable_by_key<K, F>(&mut self, f: F)
where F: FnMut(&T) -> K, K: Ord
2017-03-17 14:05:44 +00:00
{
core_slice::SliceExt::sort_unstable_by_key(self, f);
2015-03-11 04:13:29 +00:00
}
Deprecate [T]::rotate in favor of [T]::rotate_{left,right}. Background ========== Slices currently have an unstable [`rotate`] method which rotates elements in the slice to the _left_ N positions. [Here][tracking] is the tracking issue for this unstable feature. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` Proposal ======== Deprecate the [`rotate`] method and introduce `rotate_left` and `rotate_right` methods. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_left(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_right(2); assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); ``` Justification ============= I used this method today for my first time and (probably because I’m a naive westerner who reads LTR) was surprised when the docs mentioned that elements get rotated in a left-ward direction. I was in a situation where I needed to shift elements in a right-ward direction and had to context switch from the main problem I was working on and think how much to rotate left in order to accomplish the right-ward rotation I needed. Ruby’s `Array.rotate` shifts left-ward, Python’s `deque.rotate` shifts right-ward. Both of their implementations allow passing negative numbers to shift in the opposite direction respectively. Introducing `rotate_left` and `rotate_right` would: - remove ambiguity about direction (alleviating need to read docs 😉) - make it easier for people who need to rotate right [`rotate`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate [tracking]: https://github.com/rust-lang/rust/issues/41891
2017-12-16 20:29:09 +00:00
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
Deprecate [T]::rotate in favor of [T]::rotate_{left,right}. Background ========== Slices currently have an unstable [`rotate`] method which rotates elements in the slice to the _left_ N positions. [Here][tracking] is the tracking issue for this unstable feature. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` Proposal ======== Deprecate the [`rotate`] method and introduce `rotate_left` and `rotate_right` methods. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_left(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_right(2); assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); ``` Justification ============= I used this method today for my first time and (probably because I’m a naive westerner who reads LTR) was surprised when the docs mentioned that elements get rotated in a left-ward direction. I was in a situation where I needed to shift elements in a right-ward direction and had to context switch from the main problem I was working on and think how much to rotate left in order to accomplish the right-ward rotation I needed. Ruby’s `Array.rotate` shifts left-ward, Python’s `deque.rotate` shifts right-ward. Both of their implementations allow passing negative numbers to shift in the opposite direction respectively. Introducing `rotate_left` and `rotate_right` would: - remove ambiguity about direction (alleviating need to read docs 😉) - make it easier for people who need to rotate right [`rotate`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate [tracking]: https://github.com/rust-lang/rust/issues/41891
2017-12-16 20:29:09 +00:00
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
Deprecate [T]::rotate in favor of [T]::rotate_{left,right}. Background ========== Slices currently have an unstable [`rotate`] method which rotates elements in the slice to the _left_ N positions. [Here][tracking] is the tracking issue for this unstable feature. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` Proposal ======== Deprecate the [`rotate`] method and introduce `rotate_left` and `rotate_right` methods. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_left(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_right(2); assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); ``` Justification ============= I used this method today for my first time and (probably because I’m a naive westerner who reads LTR) was surprised when the docs mentioned that elements get rotated in a left-ward direction. I was in a situation where I needed to shift elements in a right-ward direction and had to context switch from the main problem I was working on and think how much to rotate left in order to accomplish the right-ward rotation I needed. Ruby’s `Array.rotate` shifts left-ward, Python’s `deque.rotate` shifts right-ward. Both of their implementations allow passing negative numbers to shift in the opposite direction respectively. Introducing `rotate_left` and `rotate_right` would: - remove ambiguity about direction (alleviating need to read docs 😉) - make it easier for people who need to rotate right [`rotate`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate [tracking]: https://github.com/rust-lang/rust/issues/41891
2017-12-16 20:29:09 +00:00
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
Deprecate [T]::rotate in favor of [T]::rotate_{left,right}. Background ========== Slices currently have an unstable [`rotate`] method which rotates elements in the slice to the _left_ N positions. [Here][tracking] is the tracking issue for this unstable feature. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` Proposal ======== Deprecate the [`rotate`] method and introduce `rotate_left` and `rotate_right` methods. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_left(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_right(2); assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); ``` Justification ============= I used this method today for my first time and (probably because I’m a naive westerner who reads LTR) was surprised when the docs mentioned that elements get rotated in a left-ward direction. I was in a situation where I needed to shift elements in a right-ward direction and had to context switch from the main problem I was working on and think how much to rotate left in order to accomplish the right-ward rotation I needed. Ruby’s `Array.rotate` shifts left-ward, Python’s `deque.rotate` shifts right-ward. Both of their implementations allow passing negative numbers to shift in the opposite direction respectively. Introducing `rotate_left` and `rotate_right` would: - remove ambiguity about direction (alleviating need to read docs 😉) - make it easier for people who need to rotate right [`rotate`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate [tracking]: https://github.com/rust-lang/rust/issues/41891
2017-12-16 20:29:09 +00:00
pub fn rotate_left(&mut self, mid: usize) {
core_slice::SliceExt::rotate_left(self, mid);
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
Deprecate [T]::rotate in favor of [T]::rotate_{left,right}. Background ========== Slices currently have an unstable [`rotate`] method which rotates elements in the slice to the _left_ N positions. [Here][tracking] is the tracking issue for this unstable feature. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` Proposal ======== Deprecate the [`rotate`] method and introduce `rotate_left` and `rotate_right` methods. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_left(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_right(2); assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); ``` Justification ============= I used this method today for my first time and (probably because I’m a naive westerner who reads LTR) was surprised when the docs mentioned that elements get rotated in a left-ward direction. I was in a situation where I needed to shift elements in a right-ward direction and had to context switch from the main problem I was working on and think how much to rotate left in order to accomplish the right-ward rotation I needed. Ruby’s `Array.rotate` shifts left-ward, Python’s `deque.rotate` shifts right-ward. Both of their implementations allow passing negative numbers to shift in the opposite direction respectively. Introducing `rotate_left` and `rotate_right` would: - remove ambiguity about direction (alleviating need to read docs 😉) - make it easier for people who need to rotate right [`rotate`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate [tracking]: https://github.com/rust-lang/rust/issues/41891
2017-12-16 20:29:09 +00:00
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
Deprecate [T]::rotate in favor of [T]::rotate_{left,right}. Background ========== Slices currently have an unstable [`rotate`] method which rotates elements in the slice to the _left_ N positions. [Here][tracking] is the tracking issue for this unstable feature. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` Proposal ======== Deprecate the [`rotate`] method and introduce `rotate_left` and `rotate_right` methods. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_left(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_right(2); assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); ``` Justification ============= I used this method today for my first time and (probably because I’m a naive westerner who reads LTR) was surprised when the docs mentioned that elements get rotated in a left-ward direction. I was in a situation where I needed to shift elements in a right-ward direction and had to context switch from the main problem I was working on and think how much to rotate left in order to accomplish the right-ward rotation I needed. Ruby’s `Array.rotate` shifts left-ward, Python’s `deque.rotate` shifts right-ward. Both of their implementations allow passing negative numbers to shift in the opposite direction respectively. Introducing `rotate_left` and `rotate_right` would: - remove ambiguity about direction (alleviating need to read docs 😉) - make it easier for people who need to rotate right [`rotate`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate [tracking]: https://github.com/rust-lang/rust/issues/41891
2017-12-16 20:29:09 +00:00
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
Deprecate [T]::rotate in favor of [T]::rotate_{left,right}. Background ========== Slices currently have an unstable [`rotate`] method which rotates elements in the slice to the _left_ N positions. [Here][tracking] is the tracking issue for this unstable feature. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` Proposal ======== Deprecate the [`rotate`] method and introduce `rotate_left` and `rotate_right` methods. ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_left(2); assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); ``` ```rust let mut a = ['a', 'b' ,'c', 'd', 'e', 'f']; a.rotate_right(2); assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); ``` Justification ============= I used this method today for my first time and (probably because I’m a naive westerner who reads LTR) was surprised when the docs mentioned that elements get rotated in a left-ward direction. I was in a situation where I needed to shift elements in a right-ward direction and had to context switch from the main problem I was working on and think how much to rotate left in order to accomplish the right-ward rotation I needed. Ruby’s `Array.rotate` shifts left-ward, Python’s `deque.rotate` shifts right-ward. Both of their implementations allow passing negative numbers to shift in the opposite direction respectively. Introducing `rotate_left` and `rotate_right` would: - remove ambiguity about direction (alleviating need to read docs 😉) - make it easier for people who need to rotate right [`rotate`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate [tracking]: https://github.com/rust-lang/rust/issues/41891
2017-12-16 20:29:09 +00:00
pub fn rotate_right(&mut self, k: usize) {
core_slice::SliceExt::rotate_right(self, k);
}
std: Stabilize APIs for the 1.7 release This commit stabilizes and deprecates the FCP (final comment period) APIs for the upcoming 1.7 beta release. The specific APIs which changed were: Stabilized * `Path::strip_prefix` (renamed from `relative_from`) * `path::StripPrefixError` (new error type returned from `strip_prefix`) * `Ipv4Addr::is_loopback` * `Ipv4Addr::is_private` * `Ipv4Addr::is_link_local` * `Ipv4Addr::is_multicast` * `Ipv4Addr::is_broadcast` * `Ipv4Addr::is_documentation` * `Ipv6Addr::is_unspecified` * `Ipv6Addr::is_loopback` * `Ipv6Addr::is_unique_local` * `Ipv6Addr::is_multicast` * `Vec::as_slice` * `Vec::as_mut_slice` * `String::as_str` * `String::as_mut_str` * `<[T]>::clone_from_slice` - the `usize` return value is removed * `<[T]>::sort_by_key` * `i32::checked_rem` (and other signed types) * `i32::checked_neg` (and other signed types) * `i32::checked_shl` (and other signed types) * `i32::checked_shr` (and other signed types) * `i32::saturating_mul` (and other signed types) * `i32::overflowing_add` (and other signed types) * `i32::overflowing_sub` (and other signed types) * `i32::overflowing_mul` (and other signed types) * `i32::overflowing_div` (and other signed types) * `i32::overflowing_rem` (and other signed types) * `i32::overflowing_neg` (and other signed types) * `i32::overflowing_shl` (and other signed types) * `i32::overflowing_shr` (and other signed types) * `u32::checked_rem` (and other unsigned types) * `u32::checked_neg` (and other unsigned types) * `u32::checked_shl` (and other unsigned types) * `u32::saturating_mul` (and other unsigned types) * `u32::overflowing_add` (and other unsigned types) * `u32::overflowing_sub` (and other unsigned types) * `u32::overflowing_mul` (and other unsigned types) * `u32::overflowing_div` (and other unsigned types) * `u32::overflowing_rem` (and other unsigned types) * `u32::overflowing_neg` (and other unsigned types) * `u32::overflowing_shl` (and other unsigned types) * `u32::overflowing_shr` (and other unsigned types) * `ffi::IntoStringError` * `CString::into_string` * `CString::into_bytes` * `CString::into_bytes_with_nul` * `From<CString> for Vec<u8>` * `From<CString> for Vec<u8>` * `IntoStringError::into_cstring` * `IntoStringError::utf8_error` * `Error for IntoStringError` Deprecated * `Path::relative_from` - renamed to `strip_prefix` * `Path::prefix` - use `components().next()` instead * `os::unix::fs` constants - moved to the `libc` crate * `fmt::{radix, Radix, RadixFmt}` - not used enough to stabilize * `IntoCow` - conflicts with `Into` and may come back later * `i32::{BITS, BYTES}` (and other integers) - not pulling their weight * `DebugTuple::formatter` - will be removed * `sync::Semaphore` - not used enough and confused with system semaphores Closes #23284 cc #27709 (still lots more methods though) Closes #27712 Closes #27722 Closes #27728 Closes #27735 Closes #27729 Closes #27755 Closes #27782 Closes #27798
2016-01-15 18:07:52 +00:00
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
std: Stabilize APIs for the 1.7 release This commit stabilizes and deprecates the FCP (final comment period) APIs for the upcoming 1.7 beta release. The specific APIs which changed were: Stabilized * `Path::strip_prefix` (renamed from `relative_from`) * `path::StripPrefixError` (new error type returned from `strip_prefix`) * `Ipv4Addr::is_loopback` * `Ipv4Addr::is_private` * `Ipv4Addr::is_link_local` * `Ipv4Addr::is_multicast` * `Ipv4Addr::is_broadcast` * `Ipv4Addr::is_documentation` * `Ipv6Addr::is_unspecified` * `Ipv6Addr::is_loopback` * `Ipv6Addr::is_unique_local` * `Ipv6Addr::is_multicast` * `Vec::as_slice` * `Vec::as_mut_slice` * `String::as_str` * `String::as_mut_str` * `<[T]>::clone_from_slice` - the `usize` return value is removed * `<[T]>::sort_by_key` * `i32::checked_rem` (and other signed types) * `i32::checked_neg` (and other signed types) * `i32::checked_shl` (and other signed types) * `i32::checked_shr` (and other signed types) * `i32::saturating_mul` (and other signed types) * `i32::overflowing_add` (and other signed types) * `i32::overflowing_sub` (and other signed types) * `i32::overflowing_mul` (and other signed types) * `i32::overflowing_div` (and other signed types) * `i32::overflowing_rem` (and other signed types) * `i32::overflowing_neg` (and other signed types) * `i32::overflowing_shl` (and other signed types) * `i32::overflowing_shr` (and other signed types) * `u32::checked_rem` (and other unsigned types) * `u32::checked_neg` (and other unsigned types) * `u32::checked_shl` (and other unsigned types) * `u32::saturating_mul` (and other unsigned types) * `u32::overflowing_add` (and other unsigned types) * `u32::overflowing_sub` (and other unsigned types) * `u32::overflowing_mul` (and other unsigned types) * `u32::overflowing_div` (and other unsigned types) * `u32::overflowing_rem` (and other unsigned types) * `u32::overflowing_neg` (and other unsigned types) * `u32::overflowing_shl` (and other unsigned types) * `u32::overflowing_shr` (and other unsigned types) * `ffi::IntoStringError` * `CString::into_string` * `CString::into_bytes` * `CString::into_bytes_with_nul` * `From<CString> for Vec<u8>` * `From<CString> for Vec<u8>` * `IntoStringError::into_cstring` * `IntoStringError::utf8_error` * `Error for IntoStringError` Deprecated * `Path::relative_from` - renamed to `strip_prefix` * `Path::prefix` - use `components().next()` instead * `os::unix::fs` constants - moved to the `libc` crate * `fmt::{radix, Radix, RadixFmt}` - not used enough to stabilize * `IntoCow` - conflicts with `Into` and may come back later * `i32::{BITS, BYTES}` (and other integers) - not pulling their weight * `DebugTuple::formatter` - will be removed * `sync::Semaphore` - not used enough and confused with system semaphores Closes #23284 cc #27709 (still lots more methods though) Closes #27712 Closes #27722 Closes #27728 Closes #27735 Closes #27729 Closes #27755 Closes #27782 Closes #27798
2016-01-15 18:07:52 +00:00
///
/// If `src` implements `Copy`, it can be more performant to use
/// [`copy_from_slice`].
///
std: Stabilize APIs for the 1.7 release This commit stabilizes and deprecates the FCP (final comment period) APIs for the upcoming 1.7 beta release. The specific APIs which changed were: Stabilized * `Path::strip_prefix` (renamed from `relative_from`) * `path::StripPrefixError` (new error type returned from `strip_prefix`) * `Ipv4Addr::is_loopback` * `Ipv4Addr::is_private` * `Ipv4Addr::is_link_local` * `Ipv4Addr::is_multicast` * `Ipv4Addr::is_broadcast` * `Ipv4Addr::is_documentation` * `Ipv6Addr::is_unspecified` * `Ipv6Addr::is_loopback` * `Ipv6Addr::is_unique_local` * `Ipv6Addr::is_multicast` * `Vec::as_slice` * `Vec::as_mut_slice` * `String::as_str` * `String::as_mut_str` * `<[T]>::clone_from_slice` - the `usize` return value is removed * `<[T]>::sort_by_key` * `i32::checked_rem` (and other signed types) * `i32::checked_neg` (and other signed types) * `i32::checked_shl` (and other signed types) * `i32::checked_shr` (and other signed types) * `i32::saturating_mul` (and other signed types) * `i32::overflowing_add` (and other signed types) * `i32::overflowing_sub` (and other signed types) * `i32::overflowing_mul` (and other signed types) * `i32::overflowing_div` (and other signed types) * `i32::overflowing_rem` (and other signed types) * `i32::overflowing_neg` (and other signed types) * `i32::overflowing_shl` (and other signed types) * `i32::overflowing_shr` (and other signed types) * `u32::checked_rem` (and other unsigned types) * `u32::checked_neg` (and other unsigned types) * `u32::checked_shl` (and other unsigned types) * `u32::saturating_mul` (and other unsigned types) * `u32::overflowing_add` (and other unsigned types) * `u32::overflowing_sub` (and other unsigned types) * `u32::overflowing_mul` (and other unsigned types) * `u32::overflowing_div` (and other unsigned types) * `u32::overflowing_rem` (and other unsigned types) * `u32::overflowing_neg` (and other unsigned types) * `u32::overflowing_shl` (and other unsigned types) * `u32::overflowing_shr` (and other unsigned types) * `ffi::IntoStringError` * `CString::into_string` * `CString::into_bytes` * `CString::into_bytes_with_nul` * `From<CString> for Vec<u8>` * `From<CString> for Vec<u8>` * `IntoStringError::into_cstring` * `IntoStringError::utf8_error` * `Error for IntoStringError` Deprecated * `Path::relative_from` - renamed to `strip_prefix` * `Path::prefix` - use `components().next()` instead * `os::unix::fs` constants - moved to the `libc` crate * `fmt::{radix, Radix, RadixFmt}` - not used enough to stabilize * `IntoCow` - conflicts with `Into` and may come back later * `i32::{BITS, BYTES}` (and other integers) - not pulling their weight * `DebugTuple::formatter` - will be removed * `sync::Semaphore` - not used enough and confused with system semaphores Closes #23284 cc #27709 (still lots more methods though) Closes #27712 Closes #27722 Closes #27728 Closes #27735 Closes #27729 Closes #27755 Closes #27782 Closes #27798
2016-01-15 18:07:52 +00:00
/// # Panics
///
/// This function will panic if the two slices have different lengths.
2015-03-11 04:13:29 +00:00
///
/// # Examples
2015-03-11 04:13:29 +00:00
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
2015-03-11 04:13:29 +00:00
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
2015-03-11 04:13:29 +00:00
/// ```
///
/// [`copy_from_slice`]: #method.copy_from_slice
/// [`split_at_mut`]: #method.split_at_mut
std: Stabilize APIs for the 1.7 release This commit stabilizes and deprecates the FCP (final comment period) APIs for the upcoming 1.7 beta release. The specific APIs which changed were: Stabilized * `Path::strip_prefix` (renamed from `relative_from`) * `path::StripPrefixError` (new error type returned from `strip_prefix`) * `Ipv4Addr::is_loopback` * `Ipv4Addr::is_private` * `Ipv4Addr::is_link_local` * `Ipv4Addr::is_multicast` * `Ipv4Addr::is_broadcast` * `Ipv4Addr::is_documentation` * `Ipv6Addr::is_unspecified` * `Ipv6Addr::is_loopback` * `Ipv6Addr::is_unique_local` * `Ipv6Addr::is_multicast` * `Vec::as_slice` * `Vec::as_mut_slice` * `String::as_str` * `String::as_mut_str` * `<[T]>::clone_from_slice` - the `usize` return value is removed * `<[T]>::sort_by_key` * `i32::checked_rem` (and other signed types) * `i32::checked_neg` (and other signed types) * `i32::checked_shl` (and other signed types) * `i32::checked_shr` (and other signed types) * `i32::saturating_mul` (and other signed types) * `i32::overflowing_add` (and other signed types) * `i32::overflowing_sub` (and other signed types) * `i32::overflowing_mul` (and other signed types) * `i32::overflowing_div` (and other signed types) * `i32::overflowing_rem` (and other signed types) * `i32::overflowing_neg` (and other signed types) * `i32::overflowing_shl` (and other signed types) * `i32::overflowing_shr` (and other signed types) * `u32::checked_rem` (and other unsigned types) * `u32::checked_neg` (and other unsigned types) * `u32::checked_shl` (and other unsigned types) * `u32::saturating_mul` (and other unsigned types) * `u32::overflowing_add` (and other unsigned types) * `u32::overflowing_sub` (and other unsigned types) * `u32::overflowing_mul` (and other unsigned types) * `u32::overflowing_div` (and other unsigned types) * `u32::overflowing_rem` (and other unsigned types) * `u32::overflowing_neg` (and other unsigned types) * `u32::overflowing_shl` (and other unsigned types) * `u32::overflowing_shr` (and other unsigned types) * `ffi::IntoStringError` * `CString::into_string` * `CString::into_bytes` * `CString::into_bytes_with_nul` * `From<CString> for Vec<u8>` * `From<CString> for Vec<u8>` * `IntoStringError::into_cstring` * `IntoStringError::utf8_error` * `Error for IntoStringError` Deprecated * `Path::relative_from` - renamed to `strip_prefix` * `Path::prefix` - use `components().next()` instead * `os::unix::fs` constants - moved to the `libc` crate * `fmt::{radix, Radix, RadixFmt}` - not used enough to stabilize * `IntoCow` - conflicts with `Into` and may come back later * `i32::{BITS, BYTES}` (and other integers) - not pulling their weight * `DebugTuple::formatter` - will be removed * `sync::Semaphore` - not used enough and confused with system semaphores Closes #23284 cc #27709 (still lots more methods though) Closes #27712 Closes #27722 Closes #27728 Closes #27735 Closes #27729 Closes #27755 Closes #27782 Closes #27798
2016-01-15 18:07:52 +00:00
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T]) where T: Clone {
core_slice::SliceExt::clone_from_slice(self, src)
2015-03-11 04:13:29 +00:00
}
2016-02-23 07:06:53 +00:00
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `src` does not implement `Copy`, use [`clone_from_slice`].
///
2016-02-23 07:06:53 +00:00
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
2016-02-23 07:06:53 +00:00
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
2016-02-23 07:06:53 +00:00
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
2016-02-23 07:06:53 +00:00
/// ```
///
/// [`clone_from_slice`]: #method.clone_from_slice
/// [`split_at_mut`]: #method.split_at_mut
std: Stabilize APIs for the 1.9 release This commit applies all stabilizations, renamings, and deprecations that the library team has decided on for the upcoming 1.9 release. All tracking issues have gone through a cycle-long "final comment period" and the specific APIs stabilized/deprecated are: Stable * `std::panic` * `std::panic::catch_unwind` (renamed from `recover`) * `std::panic::resume_unwind` (renamed from `propagate`) * `std::panic::AssertUnwindSafe` (renamed from `AssertRecoverSafe`) * `std::panic::UnwindSafe` (renamed from `RecoverSafe`) * `str::is_char_boundary` * `<*const T>::as_ref` * `<*mut T>::as_ref` * `<*mut T>::as_mut` * `AsciiExt::make_ascii_uppercase` * `AsciiExt::make_ascii_lowercase` * `char::decode_utf16` * `char::DecodeUtf16` * `char::DecodeUtf16Error` * `char::DecodeUtf16Error::unpaired_surrogate` * `BTreeSet::take` * `BTreeSet::replace` * `BTreeSet::get` * `HashSet::take` * `HashSet::replace` * `HashSet::get` * `OsString::with_capacity` * `OsString::clear` * `OsString::capacity` * `OsString::reserve` * `OsString::reserve_exact` * `OsStr::is_empty` * `OsStr::len` * `std::os::unix::thread` * `RawPthread` * `JoinHandleExt` * `JoinHandleExt::as_pthread_t` * `JoinHandleExt::into_pthread_t` * `HashSet::hasher` * `HashMap::hasher` * `CommandExt::exec` * `File::try_clone` * `SocketAddr::set_ip` * `SocketAddr::set_port` * `SocketAddrV4::set_ip` * `SocketAddrV4::set_port` * `SocketAddrV6::set_ip` * `SocketAddrV6::set_port` * `SocketAddrV6::set_flowinfo` * `SocketAddrV6::set_scope_id` * `<[T]>::copy_from_slice` * `ptr::read_volatile` * `ptr::write_volatile` * The `#[deprecated]` attribute * `OpenOptions::create_new` Deprecated * `std::raw::Slice` - use raw parts of `slice` module instead * `std::raw::Repr` - use raw parts of `slice` module instead * `str::char_range_at` - use slicing plus `chars()` plus `len_utf8` * `str::char_range_at_reverse` - use slicing plus `chars().rev()` plus `len_utf8` * `str::char_at` - use slicing plus `chars()` * `str::char_at_reverse` - use slicing plus `chars().rev()` * `str::slice_shift_char` - use `chars()` plus `Chars::as_str` * `CommandExt::session_leader` - use `before_exec` instead. Closes #27719 cc #27751 (deprecating the `Slice` bits) Closes #27754 Closes #27780 Closes #27809 Closes #27811 Closes #27830 Closes #28050 Closes #29453 Closes #29791 Closes #29935 Closes #30014 Closes #30752 Closes #31262 cc #31398 (still need to deal with `before_exec`) Closes #31405 Closes #31572 Closes #31755 Closes #31756
2016-04-07 17:42:53 +00:00
#[stable(feature = "copy_from_slice", since = "1.9.0")]
2016-02-23 07:06:53 +00:00
pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy {
core_slice::SliceExt::copy_from_slice(self, src)
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// #![feature(swap_with_slice)]
///
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// #![feature(swap_with_slice)]
///
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// #![feature(swap_with_slice)]
///
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: #method.split_at_mut
#[unstable(feature = "swap_with_slice", issue = "44030")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
core_slice::SliceExt::swap_with_slice(self, other)
}
/// Copies `self` into a new `Vec`.
2016-07-08 15:21:16 +00:00
///
/// # Examples
///
/// ```
/// let s = [10, 40, 30];
/// let x = s.to_vec();
/// // Here, `s` and `x` can be modified independently.
/// ```
type error method suggestions use whitelisted identity-like conversions Previously, on a type mismatch (and if this wasn't preëmpted by a higher-priority suggestion), we would look for argumentless methods returning the expected type, and list them in a `help` note. This had two major shortcomings. Firstly, a lot of the suggestions didn't really make sense (if you used a &str where a String was expected, `.to_ascii_uppercase()` is probably not the solution you were hoping for). Secondly, we weren't generating suggestions from the most useful traits! We address the first problem with an internal `#[rustc_conversion_suggestion]` attribute meant to mark methods that keep the "same value" in the relevant sense, just converting the type. We address the second problem by making `FnCtxt.probe_for_return_type` pass the `ProbeScope::AllTraits` to `probe_op`: this would seem to be safe because grep reveals no other callers of `probe_for_return_type`. Also, structured suggestions are preferred (because they're pretty, but also for RLS and friends). Also also, we make the E0055 autoderef recursion limit error use the one-time-diagnostics set, because we can potentially hit the limit a lot during probing. (Without this, test/ui/did_you_mean/recursion_limit_deref.rs would report "aborting due to 51 errors"). Unfortunately, the trait probing is still not all one would hope for: at a minimum, we don't know how to rule out `into()` in cases where it wouldn't actually work, and we don't know how to rule in `.to_owned()` where it would. Issues #46459 and #46460 have been filed and are ref'd in a FIXME. This is hoped to resolve #42929, #44672, and #45777.
2017-11-19 19:25:35 +00:00
#[rustc_conversion_suggestion]
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
2015-11-23 22:23:48 +00:00
pub fn to_vec(&self) -> Vec<T>
where T: Clone
{
// NB see hack module in this file
hack::to_vec(self)
2015-03-11 04:13:29 +00:00
}
/// Converts `self` into a vector without clones or allocation.
2016-07-08 15:21:16 +00:00
///
/// The resulting vector can be converted back into a box via
/// `Vec<T>`'s `into_boxed_slice` method.
///
2016-07-08 15:21:16 +00:00
/// # Examples
///
/// ```
/// let s: Box<[i32]> = Box::new([10, 40, 30]);
/// let x = s.into_vec();
/// // `s` cannot be used anymore because it has been converted into `x`.
///
/// assert_eq!(x, vec![10, 40, 30]);
2016-07-08 15:21:16 +00:00
/// ```
2015-03-11 04:13:29 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn into_vec(self: Box<Self>) -> Vec<T> {
// NB see hack module in this file
hack::into_vec(self)
2015-03-11 04:13:29 +00:00
}
}
#[lang = "slice_u8"]
#[cfg(not(test))]
impl [u8] {
/// Checks if all bytes in this slice are within the ASCII range.
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn is_ascii(&self) -> bool {
self.iter().all(|b| b.is_ascii())
}
/// Returns a vector containing a copy of this slice where each byte
/// is mapped to its ASCII upper case equivalent.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To uppercase the value in-place, use [`make_ascii_uppercase`].
///
/// [`make_ascii_uppercase`]: #method.make_ascii_uppercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> Vec<u8> {
let mut me = self.to_vec();
me.make_ascii_uppercase();
me
}
/// Returns a vector containing a copy of this slice where each byte
/// is mapped to its ASCII lower case equivalent.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To lowercase the value in-place, use [`make_ascii_lowercase`].
///
/// [`make_ascii_lowercase`]: #method.make_ascii_lowercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> Vec<u8> {
let mut me = self.to_vec();
me.make_ascii_lowercase();
me
}
/// Checks that two slices are an ASCII case-insensitive match.
///
/// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
/// but without allocating and copying temporaries.
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
self.len() == other.len() &&
self.iter().zip(other).all(|(a, b)| {
a.eq_ignore_ascii_case(b)
})
}
/// Converts this slice to its ASCII upper case equivalent in-place.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To return a new uppercased value without modifying the existing one, use
/// [`to_ascii_uppercase`].
///
/// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn make_ascii_uppercase(&mut self) {
for byte in self {
byte.make_ascii_uppercase();
}
}
/// Converts this slice to its ASCII lower case equivalent in-place.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To return a new lowercased value without modifying the existing one, use
/// [`to_ascii_lowercase`].
///
/// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn make_ascii_lowercase(&mut self) {
for byte in self {
byte.make_ascii_lowercase();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Extension traits for slices over specific kinds of data
////////////////////////////////////////////////////////////////////////////////
#[unstable(feature = "slice_concat_ext",
reason = "trait should not have to exist",
issue = "27747")]
/// An extension trait for concatenating slices
///
/// While this trait is unstable, the methods are stable. `SliceConcatExt` is
/// included in the [standard library prelude], so you can use [`join()`] and
/// [`concat()`] as if they existed on `[T]` itself.
///
/// [standard library prelude]: ../../std/prelude/index.html
/// [`join()`]: #tymethod.join
/// [`concat()`]: #tymethod.concat
pub trait SliceConcatExt<T: ?Sized> {
#[unstable(feature = "slice_concat_ext",
reason = "trait should not have to exist",
issue = "27747")]
/// The resulting type after concatenation
type Output;
/// Flattens a slice of `T` into a single value `Self::Output`.
2015-01-21 22:56:33 +00:00
///
/// # Examples
///
/// ```
/// assert_eq!(["hello", "world"].concat(), "helloworld");
/// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
2015-01-21 22:56:33 +00:00
/// ```
2015-01-24 05:48:20 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
fn concat(&self) -> Self::Output;
/// Flattens a slice of `T` into a single value `Self::Output`, placing a
/// given separator between each.
///
/// # Examples
///
/// ```
/// assert_eq!(["hello", "world"].join(" "), "hello world");
/// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
/// ```
2015-07-11 15:14:23 +00:00
#[stable(feature = "rename_connect_to_join", since = "1.3.0")]
fn join(&self, sep: &T) -> Self::Output;
2015-01-24 05:48:20 +00:00
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
fn connect(&self, sep: &T) -> Self::Output;
}
2015-11-16 16:54:28 +00:00
#[unstable(feature = "slice_concat_ext",
reason = "trait should not have to exist",
issue = "27747")]
impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] {
type Output = Vec<T>;
fn concat(&self) -> Vec<T> {
let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
let mut result = Vec::with_capacity(size);
2015-01-31 17:20:46 +00:00
for v in self {
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
result.extend_from_slice(v.borrow())
}
result
}
fn join(&self, sep: &T) -> Vec<T> {
let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
let mut result = Vec::with_capacity(size + self.len());
let mut first = true;
2015-01-31 17:20:46 +00:00
for v in self {
2015-11-23 22:23:48 +00:00
if first {
first = false
} else {
result.push(sep.clone())
}
std: Stabilize APIs for the 1.6 release This commit is the standard API stabilization commit for the 1.6 release cycle. The list of issues and APIs below have all been through their cycle-long FCP and the libs team decisions are listed below Stabilized APIs * `Read::read_exact` * `ErrorKind::UnexpectedEof` (renamed from `UnexpectedEOF`) * libcore -- this was a bit of a nuanced stabilization, the crate itself is now marked as `#[stable]` and the methods appearing via traits for primitives like `char` and `str` are now also marked as stable. Note that the extension traits themeselves are marked as unstable as they're imported via the prelude. The `try!` macro was also moved from the standard library into libcore to have the same interface. Otherwise the functions all have copied stability from the standard library now. * The `#![no_std]` attribute * `fs::DirBuilder` * `fs::DirBuilder::new` * `fs::DirBuilder::recursive` * `fs::DirBuilder::create` * `os::unix::fs::DirBuilderExt` * `os::unix::fs::DirBuilderExt::mode` * `vec::Drain` * `vec::Vec::drain` * `string::Drain` * `string::String::drain` * `vec_deque::Drain` * `vec_deque::VecDeque::drain` * `collections::hash_map::Drain` * `collections::hash_map::HashMap::drain` * `collections::hash_set::Drain` * `collections::hash_set::HashSet::drain` * `collections::binary_heap::Drain` * `collections::binary_heap::BinaryHeap::drain` * `Vec::extend_from_slice` (renamed from `push_all`) * `Mutex::get_mut` * `Mutex::into_inner` * `RwLock::get_mut` * `RwLock::into_inner` * `Iterator::min_by_key` (renamed from `min_by`) * `Iterator::max_by_key` (renamed from `max_by`) Deprecated APIs * `ErrorKind::UnexpectedEOF` (renamed to `UnexpectedEof`) * `OsString::from_bytes` * `OsStr::to_cstring` * `OsStr::to_bytes` * `fs::walk_dir` and `fs::WalkDir` * `path::Components::peek` * `slice::bytes::MutableByteVector` * `slice::bytes::copy_memory` * `Vec::push_all` (renamed to `extend_from_slice`) * `Duration::span` * `IpAddr` * `SocketAddr::ip` * `Read::tee` * `io::Tee` * `Write::broadcast` * `io::Broadcast` * `Iterator::min_by` (renamed to `min_by_key`) * `Iterator::max_by` (renamed to `max_by_key`) * `net::lookup_addr` New APIs (still unstable) * `<[T]>::sort_by_key` (added to mirror `min_by_key`) Closes #27585 Closes #27704 Closes #27707 Closes #27710 Closes #27711 Closes #27727 Closes #27740 Closes #27744 Closes #27799 Closes #27801 cc #27801 (doesn't close as `Chars` is still unstable) Closes #28968
2015-12-03 01:31:49 +00:00
result.extend_from_slice(v.borrow())
}
result
}
fn connect(&self, sep: &T) -> Vec<T> {
self.join(sep)
}
}
////////////////////////////////////////////////////////////////////////////////
// Standard trait implementations for slices
////////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Borrow<[T]> for Vec<T> {
2015-11-23 22:23:48 +00:00
fn borrow(&self) -> &[T] {
&self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> BorrowMut<[T]> for Vec<T> {
2015-11-23 22:23:48 +00:00
fn borrow_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> ToOwned for [T] {
type Owned = Vec<T>;
2015-03-12 00:44:02 +00:00
#[cfg(not(test))]
2015-11-23 22:23:48 +00:00
fn to_owned(&self) -> Vec<T> {
self.to_vec()
}
2015-03-12 00:44:02 +00:00
#[cfg(test)]
2015-11-23 22:23:48 +00:00
fn to_owned(&self) -> Vec<T> {
2017-04-08 20:55:53 +00:00
hack::to_vec(self)
2015-11-23 22:23:48 +00:00
}
fn clone_into(&self, target: &mut Vec<T>) {
// drop anything in target that will not be overwritten
target.truncate(self.len());
let len = target.len();
// reuse the contained values' allocations/resources.
target.clone_from_slice(&self[..len]);
// target.len <= self.len due to the truncate above, so the
// slice here is always in-bounds.
target.extend_from_slice(&self[len..]);
}
}
////////////////////////////////////////////////////////////////////////////////
// Sorting
////////////////////////////////////////////////////////////////////////////////
/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
///
/// This is the integral subroutine of insertion sort.
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
where F: FnMut(&T, &T) -> bool
2015-11-23 22:23:48 +00:00
{
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
if v.len() >= 2 && is_less(&v[1], &v[0]) {
unsafe {
// There are three ways to implement insertion here:
//
// 1. Swap adjacent elements until the first one gets to its final destination.
// However, this way we copy data around more than is necessary. If elements are big
// structures (costly to copy), this method will be slow.
//
// 2. Iterate until the right place for the first element is found. Then shift the
// elements succeeding it to make room for it and finally place it into the
// remaining hole. This is a good method.
//
// 3. Copy the first element into a temporary variable. Iterate until the right place
// for it is found. As we go along, copy every traversed element into the slot
// preceding it. Finally, copy data from the temporary variable into the remaining
// hole. This method is very good. Benchmarks demonstrated slightly better
// performance than with the 2nd method.
//
// All methods were benchmarked, and the 3rd showed best results. So we chose that one.
let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
// Intermediate state of the insertion process is always tracked by `hole`, which
// serves two purposes:
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` in the end.
//
// Panic safety:
//
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
// If `is_less` panics at any point during the process, `hole` will get dropped and
// fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
// initially held exactly once.
let mut hole = InsertionHole {
src: &mut *tmp,
dest: &mut v[1],
};
ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
for i in 2..v.len() {
if !is_less(&v[i], &*tmp) {
break;
}
ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
hole.dest = &mut v[i];
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
// When dropped, copies from `src` into `dest`.
struct InsertionHole<T> {
src: *mut T,
dest: *mut T,
}
impl<T> Drop for InsertionHole<T> {
fn drop(&mut self) {
unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); }
}
}
}
/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
/// stores the result into `v[..]`.
///
/// # Safety
///
/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
where F: FnMut(&T, &T) -> bool
2015-11-23 22:23:48 +00:00
{
let len = v.len();
let v = v.as_mut_ptr();
let v_mid = v.offset(mid as isize);
let v_end = v.offset(len as isize);
// The merge process first copies the shorter run into `buf`. Then it traces the newly copied
// run and the longer run forwards (or backwards), comparing their next unconsumed elements and
// copying the lesser (or greater) one into `v`.
//
// As soon as the shorter run is fully consumed, the process is done. If the longer run gets
// consumed first, then we must copy whatever is left of the shorter run into the remaining
// hole in `v`.
//
// Intermediate state of the process is always tracked by `hole`, which serves two purposes:
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` if the longer run gets consumed first.
//
// Panic safety:
//
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
// If `is_less` panics at any point during the process, `hole` will get dropped and fill the
// hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
// object it initially held exactly once.
let mut hole;
if mid <= len - mid {
// The left run is shorter.
ptr::copy_nonoverlapping(v, buf, mid);
hole = MergeHole {
start: buf,
end: buf.offset(mid as isize),
dest: v,
};
// Initially, these pointers point to the beginnings of their arrays.
let left = &mut hole.start;
let mut right = v_mid;
let out = &mut hole.dest;
while *left < hole.end && right < v_end {
// Consume the lesser side.
// If equal, prefer the left run to maintain stability.
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
let to_copy = if is_less(&*right, &**left) {
get_and_increment(&mut right)
} else {
get_and_increment(left)
};
ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
}
} else {
// The right run is shorter.
ptr::copy_nonoverlapping(v_mid, buf, len - mid);
hole = MergeHole {
start: buf,
end: buf.offset((len - mid) as isize),
dest: v_mid,
};
// Initially, these pointers point past the ends of their arrays.
let left = &mut hole.dest;
let right = &mut hole.end;
let mut out = v_end;
while v < *left && buf < *right {
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
};
ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
}
}
// Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
// it will now be copied into the hole in `v`.
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
*ptr = ptr.offset(1);
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
*ptr = ptr.offset(-1);
*ptr
}
// When dropped, copies the range `start..end` into `dest..`.
struct MergeHole<T> {
start: *mut T,
end: *mut T,
dest: *mut T,
}
impl<T> Drop for MergeHole<T> {
fn drop(&mut self) {
2017-06-24 14:51:16 +00:00
// `T` is not a zero-sized type, so it's okay to divide by its size.
let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); }
}
}
}
/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
/// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
///
/// The algorithm identifies strictly descending and non-descending subsequences, which are called
/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
/// satisfied:
///
/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
///
/// The invariants ensure that the total running time is `O(n log n)` worst-case.
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
where F: FnMut(&T, &T) -> bool
{
2017-03-17 14:05:44 +00:00
// Slices of up to this length get sorted using insertion sort.
2017-03-18 19:24:44 +00:00
const MAX_INSERTION: usize = 20;
2017-03-17 14:05:44 +00:00
// Very short runs are extended using insertion sort to span at least this many elements.
2017-03-18 19:24:44 +00:00
const MIN_RUN: usize = 10;
2017-03-17 14:05:44 +00:00
// Sorting has no meaningful behavior on zero-sized types.
if size_of::<T>() == 0 {
return;
}
let len = v.len();
// Short arrays get sorted in-place via insertion sort to avoid allocations.
2017-03-17 14:05:44 +00:00
if len <= MAX_INSERTION {
if len >= 2 {
for i in (0..len-1).rev() {
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
insert_head(&mut v[i..], &mut is_less);
}
}
return;
}
// Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
// shallow copies of the contents of `v` without risking the dtors running on copies if
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
// `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
// which will always have length at most `len / 2`.
let mut buf = Vec::with_capacity(len / 2);
// In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
// strange decision, but consider the fact that merges more often go in the opposite direction
// (forwards). According to benchmarks, merging forwards is slightly faster than merging
// backwards. To conclude, identifying runs by traversing backwards improves performance.
let mut runs = vec![];
let mut end = len;
while end > 0 {
// Find the next natural run, and reverse it if it's strictly descending.
let mut start = end - 1;
if start > 0 {
start -= 1;
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
unsafe {
if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
while start > 0 && is_less(v.get_unchecked(start),
v.get_unchecked(start - 1)) {
start -= 1;
}
v[start..end].reverse();
} else {
while start > 0 && !is_less(v.get_unchecked(start),
v.get_unchecked(start - 1)) {
start -= 1;
}
}
}
}
// Insert some more elements into the run if it's too short. Insertion sort is faster than
// merge sort on short sequences, so this significantly improves performance.
2017-03-17 14:05:44 +00:00
while start > 0 && end - start < MIN_RUN {
start -= 1;
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
insert_head(&mut v[start..end], &mut is_less);
}
// Push this run onto the stack.
runs.push(Run {
start,
len: end - start,
});
end = start;
// Merge some pairs of adjacent runs to satisfy the invariants.
while let Some(r) = collapse(&runs) {
let left = runs[r + 1];
let right = runs[r];
unsafe {
merge(&mut v[left.start .. right.start + right.len], left.len, buf.as_mut_ptr(),
Slightly optimize slice::sort First, get rid of some bound checks. Second, instead of comparing by ternary `compare` function, use a binary function testing whether an element is less than some other element. This apparently makes it easier for the compiler to reason about the code. Benchmark: ``` name before ns/iter after ns/iter diff ns/iter diff % slice::bench::sort_large_ascending 8,969 (8919 MB/s) 7,410 (10796 MB/s) -1,559 -17.38% slice::bench::sort_large_big_ascending 355,640 (3599 MB/s) 359,137 (3564 MB/s) 3,497 0.98% slice::bench::sort_large_big_descending 427,112 (2996 MB/s) 424,721 (3013 MB/s) -2,391 -0.56% slice::bench::sort_large_big_random 2,207,799 (579 MB/s) 2,138,804 (598 MB/s) -68,995 -3.13% slice::bench::sort_large_descending 13,694 (5841 MB/s) 13,514 (5919 MB/s) -180 -1.31% slice::bench::sort_large_mostly_ascending 239,697 (333 MB/s) 203,542 (393 MB/s) -36,155 -15.08% slice::bench::sort_large_mostly_descending 270,102 (296 MB/s) 234,263 (341 MB/s) -35,839 -13.27% slice::bench::sort_large_random 513,406 (155 MB/s) 470,084 (170 MB/s) -43,322 -8.44% slice::bench::sort_large_random_expensive 23,650,321 (3 MB/s) 23,675,098 (3 MB/s) 24,777 0.10% slice::bench::sort_medium_ascending 143 (5594 MB/s) 132 (6060 MB/s) -11 -7.69% slice::bench::sort_medium_descending 197 (4060 MB/s) 188 (4255 MB/s) -9 -4.57% slice::bench::sort_medium_random 3,358 (238 MB/s) 3,271 (244 MB/s) -87 -2.59% slice::bench::sort_small_ascending 32 (2500 MB/s) 32 (2500 MB/s) 0 0.00% slice::bench::sort_small_big_ascending 97 (13195 MB/s) 97 (13195 MB/s) 0 0.00% slice::bench::sort_small_big_descending 247 (5182 MB/s) 249 (5140 MB/s) 2 0.81% slice::bench::sort_small_big_random 502 (2549 MB/s) 498 (2570 MB/s) -4 -0.80% slice::bench::sort_small_descending 55 (1454 MB/s) 61 (1311 MB/s) 6 10.91% slice::bench::sort_small_random 358 (223 MB/s) 356 (224 MB/s) -2 -0.56% ```
2017-02-04 15:26:01 +00:00
&mut is_less);
}
runs[r] = Run {
start: left.start,
len: left.len + right.len,
};
runs.remove(r + 1);
}
}
// Finally, exactly one run must remain in the stack.
debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
// Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
// if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
// algorithm should continue building a new run instead, `None` is returned.
//
2017-06-24 14:51:16 +00:00
// TimSort is infamous for its buggy implementations, as described here:
// http://envisage-project.eu/timsort-specification-and-verification/
//
// The gist of the story is: we must enforce the invariants on the top four runs on the stack.
// Enforcing them on just top three is not sufficient to ensure that the invariants will still
// hold for *all* runs in the stack.
//
// This function correctly checks invariants for the top four runs. Additionally, if the top
// run starts at index 0, it will always demand a merge operation until the stack is fully
// collapsed, in order to complete the sort.
#[inline]
fn collapse(runs: &[Run]) -> Option<usize> {
let n = runs.len();
if n >= 2 && (runs[n - 1].start == 0 ||
runs[n - 2].len <= runs[n - 1].len ||
(n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) ||
(n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) {
if n >= 3 && runs[n - 3].len < runs[n - 1].len {
Some(n - 3)
} else {
Some(n - 2)
}
} else {
None
}
}
#[derive(Clone, Copy)]
struct Run {
start: usize,
len: usize,
}
}