2023-07-13 01:38:55 +00:00
|
|
|
|
#![allow(invalid_from_utf8)]
|
2023-05-13 19:49:58 +00:00
|
|
|
|
|
2021-11-05 12:39:01 +00:00
|
|
|
|
use std::assert_matches::assert_matches;
|
2019-02-03 07:27:44 +00:00
|
|
|
|
use std::borrow::Cow;
|
|
|
|
|
use std::cmp::Ordering::{Equal, Greater, Less};
|
2020-11-22 08:08:04 +00:00
|
|
|
|
use std::str::{from_utf8, from_utf8_unchecked};
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_le() {
|
|
|
|
|
assert!("" <= "");
|
|
|
|
|
assert!("" <= "foo");
|
|
|
|
|
assert!("foo" <= "foo");
|
2019-03-15 11:07:53 +00:00
|
|
|
|
assert_ne!("foo", "bar");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_find() {
|
|
|
|
|
assert_eq!("hello".find('l'), Some(2));
|
|
|
|
|
assert_eq!("hello".find(|c: char| c == 'o'), Some(4));
|
|
|
|
|
assert!("hello".find('x').is_none());
|
|
|
|
|
assert!("hello".find(|c: char| c == 'x').is_none());
|
|
|
|
|
assert_eq!("ประเทศไทย中华Việt Nam".find('华'), Some(30));
|
|
|
|
|
assert_eq!("ประเทศไทย中华Việt Nam".find(|c: char| c == '华'), Some(30));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_rfind() {
|
|
|
|
|
assert_eq!("hello".rfind('l'), Some(3));
|
|
|
|
|
assert_eq!("hello".rfind(|c: char| c == 'o'), Some(4));
|
|
|
|
|
assert!("hello".rfind('x').is_none());
|
|
|
|
|
assert!("hello".rfind(|c: char| c == 'x').is_none());
|
|
|
|
|
assert_eq!("ประเทศไทย中华Việt Nam".rfind('华'), Some(30));
|
|
|
|
|
assert_eq!("ประเทศไทย中华Việt Nam".rfind(|c: char| c == '华'), Some(30));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_collect() {
|
2015-10-22 02:14:11 +00:00
|
|
|
|
let empty = "";
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let s: String = empty.chars().collect();
|
|
|
|
|
assert_eq!(empty, s);
|
2015-10-22 02:14:11 +00:00
|
|
|
|
let data = "ประเทศไทย中";
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let s: String = data.chars().collect();
|
|
|
|
|
assert_eq!(data, s);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_into_bytes() {
|
2015-06-08 14:55:35 +00:00
|
|
|
|
let data = String::from("asdf");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let buf = data.into_bytes();
|
2015-03-11 00:59:23 +00:00
|
|
|
|
assert_eq!(buf, b"asdf");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_find_str() {
|
|
|
|
|
// byte positions
|
2015-03-30 18:00:05 +00:00
|
|
|
|
assert_eq!("".find(""), Some(0));
|
|
|
|
|
assert!("banana".find("apple pie").is_none());
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
|
|
|
|
let data = "abcabc";
|
2015-03-30 18:00:05 +00:00
|
|
|
|
assert_eq!(data[0..6].find("ab"), Some(0));
|
|
|
|
|
assert_eq!(data[2..6].find("ab"), Some(3 - 2));
|
|
|
|
|
assert!(data[2..4].find("ab").is_none());
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
|
|
|
|
let string = "ประเทศไทย中华Việt Nam";
|
2015-06-08 14:55:35 +00:00
|
|
|
|
let mut data = String::from(string);
|
2015-03-11 04:58:16 +00:00
|
|
|
|
data.push_str(string);
|
2015-03-30 18:00:05 +00:00
|
|
|
|
assert!(data.find("ไท华").is_none());
|
|
|
|
|
assert_eq!(data[0..43].find(""), Some(0));
|
|
|
|
|
assert_eq!(data[6..43].find(""), Some(6 - 6));
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
2015-03-30 18:00:05 +00:00
|
|
|
|
assert_eq!(data[0..43].find("ประ"), Some(0));
|
|
|
|
|
assert_eq!(data[0..43].find("ทศไ"), Some(12));
|
|
|
|
|
assert_eq!(data[0..43].find("ย中"), Some(24));
|
|
|
|
|
assert_eq!(data[0..43].find("iệt"), Some(34));
|
|
|
|
|
assert_eq!(data[0..43].find("Nam"), Some(40));
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
2015-03-30 18:00:05 +00:00
|
|
|
|
assert_eq!(data[43..86].find("ประ"), Some(43 - 43));
|
|
|
|
|
assert_eq!(data[43..86].find("ทศไ"), Some(55 - 43));
|
|
|
|
|
assert_eq!(data[43..86].find("ย中"), Some(67 - 43));
|
|
|
|
|
assert_eq!(data[43..86].find("iệt"), Some(77 - 43));
|
|
|
|
|
assert_eq!(data[43..86].find("Nam"), Some(83 - 43));
|
2015-08-02 17:03:01 +00:00
|
|
|
|
|
2015-10-07 22:11:25 +00:00
|
|
|
|
// find every substring -- assert that it finds it, or an earlier occurrence.
|
2015-08-02 17:03:01 +00:00
|
|
|
|
let string = "Việt Namacbaabcaabaaba";
|
|
|
|
|
for (i, ci) in string.char_indices() {
|
|
|
|
|
let ip = i + ci.len_utf8();
|
|
|
|
|
for j in string[ip..].char_indices().map(|(i, _)| i).chain(Some(string.len() - ip)) {
|
|
|
|
|
let pat = &string[i..ip + j];
|
|
|
|
|
assert!(match string.find(pat) {
|
|
|
|
|
None => false,
|
|
|
|
|
Some(x) => x <= i,
|
|
|
|
|
});
|
|
|
|
|
assert!(match string.rfind(pat) {
|
|
|
|
|
None => false,
|
|
|
|
|
Some(x) => x >= i,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn s(x: &str) -> String {
|
|
|
|
|
x.to_string()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
macro_rules! test_concat {
|
|
|
|
|
($expected: expr, $string: expr) => {{
|
|
|
|
|
let s: String = $string.concat();
|
|
|
|
|
assert_eq!($expected, s);
|
|
|
|
|
}};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_concat_for_different_types() {
|
|
|
|
|
test_concat!("ab", vec![s("a"), s("b")]);
|
|
|
|
|
test_concat!("ab", vec!["a", "b"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_concat_for_different_lengths() {
|
|
|
|
|
let empty: &[&str] = &[];
|
|
|
|
|
test_concat!("", empty);
|
|
|
|
|
test_concat!("a", ["a"]);
|
|
|
|
|
test_concat!("ab", ["a", "b"]);
|
|
|
|
|
test_concat!("abc", ["", "a", "bc"]);
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-10 12:19:21 +00:00
|
|
|
|
macro_rules! test_join {
|
2015-03-11 04:58:16 +00:00
|
|
|
|
($expected: expr, $string: expr, $delim: expr) => {{
|
2015-07-10 12:19:21 +00:00
|
|
|
|
let s = $string.join($delim);
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!($expected, s);
|
|
|
|
|
}};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2015-07-10 12:19:21 +00:00
|
|
|
|
fn test_join_for_different_types() {
|
|
|
|
|
test_join!("a-b", ["a", "b"], "-");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let hyphen = "-".to_string();
|
2015-07-10 12:19:21 +00:00
|
|
|
|
test_join!("a-b", [s("a"), s("b")], &*hyphen);
|
|
|
|
|
test_join!("a-b", vec!["a", "b"], &*hyphen);
|
|
|
|
|
test_join!("a-b", &*vec!["a", "b"], "-");
|
|
|
|
|
test_join!("a-b", vec![s("a"), s("b")], "-");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2015-07-10 12:19:21 +00:00
|
|
|
|
fn test_join_for_different_lengths() {
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let empty: &[&str] = &[];
|
2015-07-10 12:19:21 +00:00
|
|
|
|
test_join!("", empty, "-");
|
|
|
|
|
test_join!("a", ["a"], "-");
|
|
|
|
|
test_join!("a-b", ["a", "b"], "-");
|
|
|
|
|
test_join!("-a-bc", ["", "a", "bc"], "-");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-05-07 15:37:13 +00:00
|
|
|
|
// join has fast paths for small separators up to 4 bytes
|
|
|
|
|
// this tests the slow paths.
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_join_for_different_lengths_with_long_separator() {
|
|
|
|
|
assert_eq!("~~~~~".len(), 15);
|
|
|
|
|
|
|
|
|
|
let empty: &[&str] = &[];
|
|
|
|
|
test_join!("", empty, "~~~~~");
|
|
|
|
|
test_join!("a", ["a"], "~~~~~");
|
|
|
|
|
test_join!("a~~~~~b", ["a", "b"], "~~~~~");
|
|
|
|
|
test_join!("~~~~~a~~~~~bc", ["", "a", "bc"], "~~~~~");
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-03 21:36:33 +00:00
|
|
|
|
#[test]
|
2021-12-14 14:23:34 +00:00
|
|
|
|
fn test_join_issue_80335() {
|
2021-02-03 21:36:33 +00:00
|
|
|
|
use core::{borrow::Borrow, cell::Cell};
|
|
|
|
|
|
|
|
|
|
struct WeirdBorrow {
|
|
|
|
|
state: Cell<bool>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Default for WeirdBorrow {
|
|
|
|
|
fn default() -> Self {
|
|
|
|
|
WeirdBorrow { state: Cell::new(false) }
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Borrow<str> for WeirdBorrow {
|
|
|
|
|
fn borrow(&self) -> &str {
|
|
|
|
|
let state = self.state.get();
|
|
|
|
|
if state {
|
|
|
|
|
"0"
|
|
|
|
|
} else {
|
|
|
|
|
self.state.set(true);
|
|
|
|
|
"123456"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let arr: [WeirdBorrow; 3] = Default::default();
|
|
|
|
|
test_join!("0-0-0", arr, "-");
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
2019-12-07 11:42:19 +00:00
|
|
|
|
#[cfg_attr(miri, ignore)] // Miri is too slow
|
2015-03-11 04:58:16 +00:00
|
|
|
|
fn test_unsafe_slice() {
|
2018-06-26 12:33:57 +00:00
|
|
|
|
assert_eq!("ab", unsafe { "abc".get_unchecked(0..2) });
|
|
|
|
|
assert_eq!("bc", unsafe { "abc".get_unchecked(1..3) });
|
|
|
|
|
assert_eq!("", unsafe { "abc".get_unchecked(1..1) });
|
2015-03-11 04:58:16 +00:00
|
|
|
|
fn a_million_letter_a() -> String {
|
|
|
|
|
let mut i = 0;
|
|
|
|
|
let mut rs = String::new();
|
|
|
|
|
while i < 100000 {
|
|
|
|
|
rs.push_str("aaaaaaaaaa");
|
|
|
|
|
i += 1;
|
|
|
|
|
}
|
|
|
|
|
rs
|
|
|
|
|
}
|
|
|
|
|
fn half_a_million_letter_a() -> String {
|
|
|
|
|
let mut i = 0;
|
|
|
|
|
let mut rs = String::new();
|
|
|
|
|
while i < 100000 {
|
|
|
|
|
rs.push_str("aaaaa");
|
|
|
|
|
i += 1;
|
|
|
|
|
}
|
|
|
|
|
rs
|
|
|
|
|
}
|
|
|
|
|
let letters = a_million_letter_a();
|
2018-06-26 12:33:57 +00:00
|
|
|
|
assert_eq!(half_a_million_letter_a(), unsafe { letters.get_unchecked(0..500000) });
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_starts_with() {
|
2016-06-09 02:50:08 +00:00
|
|
|
|
assert!("".starts_with(""));
|
|
|
|
|
assert!("abc".starts_with(""));
|
|
|
|
|
assert!("abc".starts_with("a"));
|
|
|
|
|
assert!(!"a".starts_with("abc"));
|
|
|
|
|
assert!(!"".starts_with("abc"));
|
|
|
|
|
assert!(!"ödd".starts_with("-"));
|
|
|
|
|
assert!("ödd".starts_with("öd"));
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_ends_with() {
|
2016-06-09 02:50:08 +00:00
|
|
|
|
assert!("".ends_with(""));
|
|
|
|
|
assert!("abc".ends_with(""));
|
|
|
|
|
assert!("abc".ends_with("c"));
|
|
|
|
|
assert!(!"a".ends_with("abc"));
|
|
|
|
|
assert!(!"".ends_with("abc"));
|
|
|
|
|
assert!(!"ddö".ends_with("-"));
|
|
|
|
|
assert!("ddö".ends_with("dö"));
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_is_empty() {
|
|
|
|
|
assert!("".is_empty());
|
|
|
|
|
assert!(!"a".is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-08 10:55:04 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_replacen() {
|
|
|
|
|
assert_eq!("".replacen('a', "b", 5), "");
|
|
|
|
|
assert_eq!("acaaa".replacen("a", "b", 3), "bcbba");
|
|
|
|
|
assert_eq!("aaaa".replacen("a", "b", 0), "aaaa");
|
|
|
|
|
|
|
|
|
|
let test = "test";
|
|
|
|
|
assert_eq!(" test test ".replacen(test, "toast", 3), " toast toast ");
|
|
|
|
|
assert_eq!(" test test ".replacen(test, "toast", 0), " test test ");
|
|
|
|
|
assert_eq!(" test test ".replacen(test, "", 5), " ");
|
|
|
|
|
|
|
|
|
|
assert_eq!("qwer123zxc789".replacen(char::is_numeric, "", 3), "qwerzxc789");
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_replace() {
|
|
|
|
|
let a = "a";
|
2015-10-22 02:14:11 +00:00
|
|
|
|
assert_eq!("".replace(a, "b"), "");
|
|
|
|
|
assert_eq!("a".replace(a, "b"), "b");
|
|
|
|
|
assert_eq!("ab".replace(a, "b"), "bb");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let test = "test";
|
2015-10-22 02:14:11 +00:00
|
|
|
|
assert_eq!(" test test ".replace(test, "toast"), " toast toast ");
|
|
|
|
|
assert_eq!(" test test ".replace(test, ""), " ");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_replace_2a() {
|
|
|
|
|
let data = "ประเทศไทย中华";
|
|
|
|
|
let repl = "دولة الكويت";
|
|
|
|
|
|
|
|
|
|
let a = "ประเ";
|
|
|
|
|
let a2 = "دولة الكويتทศไทย中华";
|
|
|
|
|
assert_eq!(data.replace(a, repl), a2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_replace_2b() {
|
|
|
|
|
let data = "ประเทศไทย中华";
|
|
|
|
|
let repl = "دولة الكويت";
|
|
|
|
|
|
|
|
|
|
let b = "ะเ";
|
|
|
|
|
let b2 = "ปรدولة الكويتทศไทย中华";
|
|
|
|
|
assert_eq!(data.replace(b, repl), b2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_replace_2c() {
|
|
|
|
|
let data = "ประเทศไทย中华";
|
|
|
|
|
let repl = "دولة الكويت";
|
|
|
|
|
|
|
|
|
|
let c = "中华";
|
|
|
|
|
let c2 = "ประเทศไทยدولة الكويت";
|
|
|
|
|
assert_eq!(data.replace(c, repl), c2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_replace_2d() {
|
|
|
|
|
let data = "ประเทศไทย中华";
|
|
|
|
|
let repl = "دولة الكويت";
|
|
|
|
|
|
|
|
|
|
let d = "ไท华";
|
|
|
|
|
assert_eq!(data.replace(d, repl), data);
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-01 04:21:47 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_replace_pattern() {
|
|
|
|
|
let data = "abcdαβγδabcdαβγδ";
|
|
|
|
|
assert_eq!(data.replace("dαβ", "😺😺😺"), "abc😺😺😺γδabc😺😺😺γδ");
|
|
|
|
|
assert_eq!(data.replace('γ', "😺😺😺"), "abcdαβ😺😺😺δabcdαβ😺😺😺δ");
|
|
|
|
|
assert_eq!(data.replace(&['a', 'γ'] as &[_], "😺😺😺"), "😺😺😺bcdαβ😺😺😺δ😺😺😺bcdαβ😺😺😺δ");
|
|
|
|
|
assert_eq!(data.replace(|c| c == 'γ', "😺😺😺"), "abcdαβ😺😺😺δabcdαβ😺😺😺δ");
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
// The current implementation of SliceIndex fails to handle methods
|
|
|
|
|
// orthogonally from range types; therefore, it is worth testing
|
|
|
|
|
// all of the indexing operations on each input.
|
2018-04-30 11:37:02 +00:00
|
|
|
|
mod slice_index {
|
2018-04-30 11:37:08 +00:00
|
|
|
|
// Test a slicing operation **that should succeed,**
|
|
|
|
|
// testing it on all of the indexing methods.
|
|
|
|
|
//
|
2018-04-30 11:37:36 +00:00
|
|
|
|
// This is not suitable for testing failure on invalid inputs.
|
2018-04-30 11:37:08 +00:00
|
|
|
|
macro_rules! assert_range_eq {
|
|
|
|
|
($s:expr, $range:expr, $expected:expr) => {
|
|
|
|
|
let mut s: String = $s.to_owned();
|
|
|
|
|
let mut expected: String = $expected.to_owned();
|
|
|
|
|
{
|
|
|
|
|
let s: &str = &s;
|
|
|
|
|
let expected: &str = &expected;
|
|
|
|
|
|
|
|
|
|
assert_eq!(&s[$range], expected, "(in assertion for: index)");
|
|
|
|
|
assert_eq!(s.get($range), Some(expected), "(in assertion for: get)");
|
|
|
|
|
unsafe {
|
|
|
|
|
assert_eq!(
|
|
|
|
|
s.get_unchecked($range),
|
|
|
|
|
expected,
|
|
|
|
|
"(in assertion for: get_unchecked)",
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
let s: &mut str = &mut s;
|
|
|
|
|
let expected: &mut str = &mut expected;
|
|
|
|
|
|
|
|
|
|
assert_eq!(&mut s[$range], expected, "(in assertion for: index_mut)",);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
s.get_mut($range),
|
|
|
|
|
Some(&mut expected[..]),
|
|
|
|
|
"(in assertion for: get_mut)",
|
|
|
|
|
);
|
|
|
|
|
unsafe {
|
|
|
|
|
assert_eq!(
|
|
|
|
|
s.get_unchecked_mut($range),
|
|
|
|
|
expected,
|
|
|
|
|
"(in assertion for: get_unchecked_mut)",
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Make sure the macro can actually detect bugs,
|
|
|
|
|
// because if it can't, then what are we even doing here?
|
|
|
|
|
//
|
|
|
|
|
// (Be aware this only demonstrates the ability to detect bugs
|
2018-04-30 11:37:36 +00:00
|
|
|
|
// in the FIRST method that panics, as the macro is not designed
|
2018-04-30 11:37:08 +00:00
|
|
|
|
// to be used in `should_panic`)
|
2018-04-30 11:37:02 +00:00
|
|
|
|
#[test]
|
2018-04-30 11:37:08 +00:00
|
|
|
|
#[should_panic(expected = "out of bounds")]
|
|
|
|
|
fn assert_range_eq_can_fail_by_panic() {
|
|
|
|
|
assert_range_eq!("abc", 0..5, "abc");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// (Be aware this only demonstrates the ability to detect bugs
|
|
|
|
|
// in the FIRST method it calls, as the macro is not designed
|
|
|
|
|
// to be used in `should_panic`)
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic(expected = "==")]
|
|
|
|
|
fn assert_range_eq_can_fail_by_inequality() {
|
|
|
|
|
assert_range_eq!("abc", 0..2, "abc");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Generates test cases for bad index operations.
|
|
|
|
|
//
|
|
|
|
|
// This generates `should_panic` test cases for Index/IndexMut
|
|
|
|
|
// and `None` test cases for get/get_mut.
|
|
|
|
|
macro_rules! panic_cases {
|
|
|
|
|
($(
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod $case_name:ident {
|
|
|
|
|
data: $data:expr;
|
2018-04-30 11:37:08 +00:00
|
|
|
|
|
|
|
|
|
// optional:
|
|
|
|
|
//
|
|
|
|
|
// a similar input for which DATA[input] succeeds, and the corresponding
|
|
|
|
|
// output str. This helps validate "critical points" where an input range
|
|
|
|
|
// straddles the boundary between valid and invalid.
|
|
|
|
|
// (such as the input `len..len`, which is just barely valid)
|
|
|
|
|
$(
|
2018-04-30 11:37:36 +00:00
|
|
|
|
good: data[$good:expr] == $output:expr;
|
2018-04-30 11:37:08 +00:00
|
|
|
|
)*
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
bad: data[$bad:expr];
|
|
|
|
|
message: $expect_msg:expr; // must be a literal
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
)*) => {$(
|
|
|
|
|
mod $case_name {
|
|
|
|
|
#[test]
|
|
|
|
|
fn pass() {
|
|
|
|
|
let mut v: String = $data.into();
|
|
|
|
|
|
|
|
|
|
$( assert_range_eq!(v, $good, $output); )*
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
let v: &str = &v;
|
|
|
|
|
assert_eq!(v.get($bad), None, "(in None assertion for get)");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
let v: &mut str = &mut v;
|
|
|
|
|
assert_eq!(v.get_mut($bad), None, "(in None assertion for get_mut)");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic(expected = $expect_msg)]
|
|
|
|
|
fn index_fail() {
|
|
|
|
|
let v: String = $data.into();
|
|
|
|
|
let v: &str = &v;
|
|
|
|
|
let _v = &v[$bad];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic(expected = $expect_msg)]
|
|
|
|
|
fn index_mut_fail() {
|
|
|
|
|
let mut v: String = $data.into();
|
|
|
|
|
let v: &mut str = &mut v;
|
|
|
|
|
let _v = &mut v[$bad];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
)*};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn simple_ascii() {
|
|
|
|
|
assert_range_eq!("abc", .., "abc");
|
|
|
|
|
|
|
|
|
|
assert_range_eq!("abc", 0..2, "ab");
|
|
|
|
|
assert_range_eq!("abc", 0..=1, "ab");
|
|
|
|
|
assert_range_eq!("abc", ..2, "ab");
|
|
|
|
|
assert_range_eq!("abc", ..=1, "ab");
|
|
|
|
|
|
|
|
|
|
assert_range_eq!("abc", 1..3, "bc");
|
|
|
|
|
assert_range_eq!("abc", 1..=2, "bc");
|
|
|
|
|
assert_range_eq!("abc", 1..1, "");
|
|
|
|
|
assert_range_eq!("abc", 1..=0, "");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn simple_unicode() {
|
|
|
|
|
// 日本
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", .., "\u{65e5}\u{672c}");
|
|
|
|
|
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", 0..3, "\u{65e5}");
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", 0..=2, "\u{65e5}");
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", ..3, "\u{65e5}");
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", ..=2, "\u{65e5}");
|
|
|
|
|
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", 3..6, "\u{672c}");
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", 3..=5, "\u{672c}");
|
|
|
|
|
assert_range_eq!("\u{65e5}\u{672c}", 3.., "\u{672c}");
|
2018-04-30 11:37:02 +00:00
|
|
|
|
|
|
|
|
|
let data = "ประเทศไทย中华";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
assert_range_eq!(data, 0..3, "ป");
|
|
|
|
|
assert_range_eq!(data, 3..6, "ร");
|
|
|
|
|
assert_range_eq!(data, 3..3, "");
|
|
|
|
|
assert_range_eq!(data, 30..33, "华");
|
|
|
|
|
|
|
|
|
|
/*0: 中
|
|
|
|
|
3: 华
|
|
|
|
|
6: V
|
|
|
|
|
7: i
|
|
|
|
|
8: ệ
|
|
|
|
|
11: t
|
|
|
|
|
12:
|
|
|
|
|
13: N
|
|
|
|
|
14: a
|
|
|
|
|
15: m */
|
|
|
|
|
let ss = "中华Việt Nam";
|
|
|
|
|
assert_range_eq!(ss, 3..6, "华");
|
|
|
|
|
assert_range_eq!(ss, 6..16, "Việt Nam");
|
|
|
|
|
assert_range_eq!(ss, 6..=15, "Việt Nam");
|
|
|
|
|
assert_range_eq!(ss, 6.., "Việt Nam");
|
|
|
|
|
|
|
|
|
|
assert_range_eq!(ss, 0..3, "中");
|
|
|
|
|
assert_range_eq!(ss, 3..7, "华V");
|
|
|
|
|
assert_range_eq!(ss, 3..=6, "华V");
|
|
|
|
|
assert_range_eq!(ss, 3..3, "");
|
|
|
|
|
assert_range_eq!(ss, 3..=2, "");
|
|
|
|
|
}
|
2018-04-30 11:37:02 +00:00
|
|
|
|
|
2018-04-30 11:37:08 +00:00
|
|
|
|
#[test]
|
2019-12-07 11:42:19 +00:00
|
|
|
|
#[cfg_attr(target_os = "emscripten", ignore)] // hits an OOM
|
|
|
|
|
#[cfg_attr(miri, ignore)] // Miri is too slow
|
2018-04-30 11:37:08 +00:00
|
|
|
|
fn simple_big() {
|
2018-04-30 11:37:02 +00:00
|
|
|
|
fn a_million_letter_x() -> String {
|
|
|
|
|
let mut i = 0;
|
|
|
|
|
let mut rs = String::new();
|
|
|
|
|
while i < 100000 {
|
|
|
|
|
rs.push_str("华华华华华华华华华华");
|
|
|
|
|
i += 1;
|
|
|
|
|
}
|
|
|
|
|
rs
|
|
|
|
|
}
|
|
|
|
|
fn half_a_million_letter_x() -> String {
|
|
|
|
|
let mut i = 0;
|
|
|
|
|
let mut rs = String::new();
|
|
|
|
|
while i < 100000 {
|
|
|
|
|
rs.push_str("华华华华华");
|
|
|
|
|
i += 1;
|
|
|
|
|
}
|
|
|
|
|
rs
|
|
|
|
|
}
|
|
|
|
|
let letters = a_million_letter_x();
|
2018-04-30 11:37:08 +00:00
|
|
|
|
assert_range_eq!(letters, 0..3 * 500000, half_a_million_letter_x());
|
2018-04-30 11:37:02 +00:00
|
|
|
|
}
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
2018-04-30 11:37:02 +00:00
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic]
|
|
|
|
|
fn test_slice_fail() {
|
2021-06-18 07:09:40 +00:00
|
|
|
|
let _ = &"中华Việt Nam"[0..2];
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
2018-04-30 11:37:02 +00:00
|
|
|
|
|
2018-04-30 11:37:08 +00:00
|
|
|
|
panic_cases! {
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangefrom_len {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[6..] == "";
|
|
|
|
|
bad: data[7..];
|
|
|
|
|
message: "out of bounds";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangeto_len {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[..6] == "abcdef";
|
|
|
|
|
bad: data[..7];
|
|
|
|
|
message: "out of bounds";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangetoinclusive_len {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[..=5] == "abcdef";
|
|
|
|
|
bad: data[..=6];
|
|
|
|
|
message: "out of bounds";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-10-21 00:18:08 +00:00
|
|
|
|
in mod rangeinclusive_len {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[0..=5] == "abcdef";
|
|
|
|
|
bad: data[0..=6];
|
|
|
|
|
message: "out of bounds";
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod range_len_len {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[6..6] == "";
|
|
|
|
|
bad: data[7..7];
|
|
|
|
|
message: "out of bounds";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangeinclusive_len_len {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[6..=5] == "";
|
|
|
|
|
bad: data[7..=6];
|
|
|
|
|
message: "out of bounds";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
2018-04-30 11:37:02 +00:00
|
|
|
|
}
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
2020-10-21 00:18:08 +00:00
|
|
|
|
panic_cases! {
|
|
|
|
|
in mod rangeinclusive_exhausted {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
|
|
|
|
|
good: data[0..=5] == "abcdef";
|
|
|
|
|
good: data[{
|
|
|
|
|
let mut iter = 0..=5;
|
|
|
|
|
iter.by_ref().count(); // exhaust it
|
|
|
|
|
iter
|
|
|
|
|
}] == "";
|
|
|
|
|
|
|
|
|
|
// 0..=6 is out of bounds before exhaustion, so it
|
|
|
|
|
// stands to reason that it still would be after.
|
|
|
|
|
bad: data[{
|
|
|
|
|
let mut iter = 0..=6;
|
|
|
|
|
iter.by_ref().count(); // exhaust it
|
|
|
|
|
iter
|
|
|
|
|
}];
|
|
|
|
|
message: "out of bounds";
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:08 +00:00
|
|
|
|
panic_cases! {
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod range_neg_width {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[4..4] == "";
|
|
|
|
|
bad: data[4..3];
|
|
|
|
|
message: "begin <= end (4 <= 3)";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangeinclusive_neg_width {
|
|
|
|
|
data: "abcdef";
|
|
|
|
|
good: data[4..=3] == "";
|
|
|
|
|
bad: data[4..=2];
|
|
|
|
|
message: "begin <= end (4 <= 3)";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
2018-04-30 11:37:02 +00:00
|
|
|
|
}
|
2017-06-04 18:08:25 +00:00
|
|
|
|
|
2018-04-30 11:37:08 +00:00
|
|
|
|
mod overflow {
|
|
|
|
|
panic_cases! {
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangeinclusive {
|
|
|
|
|
data: "hello";
|
2018-04-30 11:37:19 +00:00
|
|
|
|
// note: using 0 specifically ensures that the result of overflowing is 0..0,
|
|
|
|
|
// so that `get` doesn't simply return None for the wrong reason.
|
2020-06-02 07:59:11 +00:00
|
|
|
|
bad: data[0..=usize::MAX];
|
2018-04-30 11:37:36 +00:00
|
|
|
|
message: "maximum usize";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangetoinclusive {
|
|
|
|
|
data: "hello";
|
2020-06-02 07:59:11 +00:00
|
|
|
|
bad: data[..=usize::MAX];
|
2018-04-30 11:37:36 +00:00
|
|
|
|
message: "maximum usize";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
2018-04-30 11:37:02 +00:00
|
|
|
|
}
|
2018-04-30 11:37:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mod boundary {
|
2019-02-02 11:27:41 +00:00
|
|
|
|
const DATA: &str = "abcαβγ";
|
2018-04-30 11:37:08 +00:00
|
|
|
|
|
|
|
|
|
const BAD_START: usize = 4;
|
|
|
|
|
const GOOD_START: usize = 3;
|
|
|
|
|
const BAD_END: usize = 6;
|
|
|
|
|
const GOOD_END: usize = 7;
|
|
|
|
|
const BAD_END_INCL: usize = BAD_END - 1;
|
|
|
|
|
const GOOD_END_INCL: usize = GOOD_END - 1;
|
|
|
|
|
|
|
|
|
|
// it is especially important to test all of the different range types here
|
|
|
|
|
// because some of the logic may be duplicated as part of micro-optimizations
|
|
|
|
|
// to dodge unicode boundary checks on half-ranges.
|
|
|
|
|
panic_cases! {
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod range_1 {
|
|
|
|
|
data: super::DATA;
|
|
|
|
|
bad: data[super::BAD_START..super::GOOD_END];
|
|
|
|
|
message:
|
2018-04-30 11:37:08 +00:00
|
|
|
|
"byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of";
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod range_2 {
|
|
|
|
|
data: super::DATA;
|
|
|
|
|
bad: data[super::GOOD_START..super::BAD_END];
|
|
|
|
|
message:
|
2018-04-30 11:37:08 +00:00
|
|
|
|
"byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangefrom {
|
|
|
|
|
data: super::DATA;
|
|
|
|
|
bad: data[super::BAD_START..];
|
|
|
|
|
message:
|
2018-04-30 11:37:08 +00:00
|
|
|
|
"byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of";
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangeto {
|
|
|
|
|
data: super::DATA;
|
|
|
|
|
bad: data[..super::BAD_END];
|
|
|
|
|
message:
|
2018-04-30 11:37:08 +00:00
|
|
|
|
"byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangeinclusive_1 {
|
|
|
|
|
data: super::DATA;
|
|
|
|
|
bad: data[super::BAD_START..=super::GOOD_END_INCL];
|
|
|
|
|
message:
|
2018-04-30 11:37:08 +00:00
|
|
|
|
"byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of";
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangeinclusive_2 {
|
|
|
|
|
data: super::DATA;
|
|
|
|
|
bad: data[super::GOOD_START..=super::BAD_END_INCL];
|
|
|
|
|
message:
|
2018-04-30 11:37:08 +00:00
|
|
|
|
"byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-30 11:37:36 +00:00
|
|
|
|
in mod rangetoinclusive {
|
|
|
|
|
data: super::DATA;
|
|
|
|
|
bad: data[..=super::BAD_END_INCL];
|
|
|
|
|
message:
|
2018-04-30 11:37:08 +00:00
|
|
|
|
"byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
|
|
|
|
|
}
|
2018-04-30 11:37:02 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2017-06-04 18:08:25 +00:00
|
|
|
|
|
2019-02-02 11:27:41 +00:00
|
|
|
|
const LOREM_PARAGRAPH: &str = "\
|
2018-04-30 11:37:08 +00:00
|
|
|
|
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem \
|
|
|
|
|
sit amet dolor ultricies condimentum. Praesent iaculis purus elit, ac malesuada \
|
|
|
|
|
quam malesuada in. Duis sed orci eros. Suspendisse sit amet magna mollis, mollis \
|
|
|
|
|
nunc luctus, imperdiet mi. Integer fringilla non sem ut lacinia. Fusce varius \
|
|
|
|
|
tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec tempus vel, \
|
|
|
|
|
gravida nec quam.";
|
2018-04-30 11:37:02 +00:00
|
|
|
|
|
|
|
|
|
// check the panic includes the prefix of the sliced string
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic(expected = "byte index 1024 is out of bounds of `Lorem ipsum dolor sit amet")]
|
|
|
|
|
fn test_slice_fail_truncated_1() {
|
2021-06-18 07:09:40 +00:00
|
|
|
|
let _ = &LOREM_PARAGRAPH[..1024];
|
2018-04-30 11:37:02 +00:00
|
|
|
|
}
|
|
|
|
|
// check the truncation in the panic message
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic(expected = "luctus, im`[...]")]
|
|
|
|
|
fn test_slice_fail_truncated_2() {
|
2021-06-18 07:09:40 +00:00
|
|
|
|
let _ = &LOREM_PARAGRAPH[..1024];
|
2018-04-30 11:37:02 +00:00
|
|
|
|
}
|
2017-06-04 18:08:25 +00:00
|
|
|
|
}
|
2016-04-10 18:09:26 +00:00
|
|
|
|
|
2018-04-18 20:48:34 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_str_slice_rangetoinclusive_ok() {
|
|
|
|
|
let s = "abcαβγ";
|
|
|
|
|
assert_eq!(&s[..=2], "abc");
|
|
|
|
|
assert_eq!(&s[..=4], "abcα");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic]
|
|
|
|
|
fn test_str_slice_rangetoinclusive_notok() {
|
|
|
|
|
let s = "abcαβγ";
|
2021-06-18 07:09:40 +00:00
|
|
|
|
let _ = &s[..=3];
|
2018-04-18 20:48:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-04-16 20:34:09 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_str_slicemut_rangetoinclusive_ok() {
|
|
|
|
|
let mut s = "abcαβγ".to_owned();
|
|
|
|
|
let s: &mut str = &mut s;
|
2018-04-18 20:48:34 +00:00
|
|
|
|
assert_eq!(&mut s[..=2], "abc");
|
|
|
|
|
assert_eq!(&mut s[..=4], "abcα");
|
2018-04-16 20:34:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic]
|
|
|
|
|
fn test_str_slicemut_rangetoinclusive_notok() {
|
|
|
|
|
let mut s = "abcαβγ".to_owned();
|
|
|
|
|
let s: &mut str = &mut s;
|
2021-06-18 07:09:40 +00:00
|
|
|
|
let _ = &mut s[..=3];
|
2018-04-16 20:34:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-04-10 18:09:26 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_is_char_boundary() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam β-release 🐱123";
|
|
|
|
|
assert!(s.is_char_boundary(0));
|
|
|
|
|
assert!(s.is_char_boundary(s.len()));
|
|
|
|
|
assert!(!s.is_char_boundary(s.len() + 1));
|
|
|
|
|
for (i, ch) in s.char_indices() {
|
|
|
|
|
// ensure character locations are boundaries and continuation bytes are not
|
|
|
|
|
assert!(s.is_char_boundary(i), "{} is a char boundary in {:?}", i, s);
|
|
|
|
|
for j in 1..ch.len_utf8() {
|
|
|
|
|
assert!(
|
|
|
|
|
!s.is_char_boundary(i + j),
|
|
|
|
|
"{} should not be a char boundary in {:?}",
|
|
|
|
|
i + j,
|
|
|
|
|
s
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
2018-06-21 13:04:53 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_trim_start_matches() {
|
|
|
|
|
let v: &[char] = &[];
|
|
|
|
|
assert_eq!(" *** foo *** ".trim_start_matches(v), " *** foo *** ");
|
|
|
|
|
let chars: &[char] = &['*', ' '];
|
|
|
|
|
assert_eq!(" *** foo *** ".trim_start_matches(chars), "foo *** ");
|
|
|
|
|
assert_eq!(" *** *** ".trim_start_matches(chars), "");
|
|
|
|
|
assert_eq!("foo *** ".trim_start_matches(chars), "foo *** ");
|
|
|
|
|
|
|
|
|
|
assert_eq!("11foo1bar11".trim_start_matches('1'), "foo1bar11");
|
|
|
|
|
let chars: &[char] = &['1', '2'];
|
|
|
|
|
assert_eq!("12foo1bar12".trim_start_matches(chars), "foo1bar12");
|
|
|
|
|
assert_eq!("123foo1bar123".trim_start_matches(|c: char| c.is_numeric()), "foo1bar123");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_trim_end_matches() {
|
|
|
|
|
let v: &[char] = &[];
|
|
|
|
|
assert_eq!(" *** foo *** ".trim_end_matches(v), " *** foo *** ");
|
|
|
|
|
let chars: &[char] = &['*', ' '];
|
|
|
|
|
assert_eq!(" *** foo *** ".trim_end_matches(chars), " *** foo");
|
|
|
|
|
assert_eq!(" *** *** ".trim_end_matches(chars), "");
|
|
|
|
|
assert_eq!(" *** foo".trim_end_matches(chars), " *** foo");
|
|
|
|
|
|
|
|
|
|
assert_eq!("11foo1bar11".trim_end_matches('1'), "11foo1bar");
|
|
|
|
|
let chars: &[char] = &['1', '2'];
|
|
|
|
|
assert_eq!("12foo1bar12".trim_end_matches(chars), "12foo1bar");
|
|
|
|
|
assert_eq!("123foo1bar123".trim_end_matches(|c: char| c.is_numeric()), "123foo1bar");
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_trim_matches() {
|
|
|
|
|
let v: &[char] = &[];
|
|
|
|
|
assert_eq!(" *** foo *** ".trim_matches(v), " *** foo *** ");
|
|
|
|
|
let chars: &[char] = &['*', ' '];
|
|
|
|
|
assert_eq!(" *** foo *** ".trim_matches(chars), "foo");
|
|
|
|
|
assert_eq!(" *** *** ".trim_matches(chars), "");
|
|
|
|
|
assert_eq!("foo".trim_matches(chars), "foo");
|
|
|
|
|
|
|
|
|
|
assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
|
|
|
|
|
let chars: &[char] = &['1', '2'];
|
|
|
|
|
assert_eq!("12foo1bar12".trim_matches(chars), "foo1bar");
|
|
|
|
|
assert_eq!("123foo1bar123".trim_matches(|c: char| c.is_numeric()), "foo1bar");
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-21 13:04:53 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_trim_start() {
|
|
|
|
|
assert_eq!("".trim_start(), "");
|
|
|
|
|
assert_eq!("a".trim_start(), "a");
|
|
|
|
|
assert_eq!(" ".trim_start(), "");
|
|
|
|
|
assert_eq!(" blah".trim_start(), "blah");
|
|
|
|
|
assert_eq!(" \u{3000} wut".trim_start(), "wut");
|
|
|
|
|
assert_eq!("hey ".trim_start(), "hey ");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_trim_end() {
|
|
|
|
|
assert_eq!("".trim_end(), "");
|
|
|
|
|
assert_eq!("a".trim_end(), "a");
|
|
|
|
|
assert_eq!(" ".trim_end(), "");
|
|
|
|
|
assert_eq!("blah ".trim_end(), "blah");
|
|
|
|
|
assert_eq!("wut \u{3000} ".trim_end(), "wut");
|
|
|
|
|
assert_eq!(" hey".trim_end(), " hey");
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_trim() {
|
|
|
|
|
assert_eq!("".trim(), "");
|
|
|
|
|
assert_eq!("a".trim(), "a");
|
|
|
|
|
assert_eq!(" ".trim(), "");
|
|
|
|
|
assert_eq!(" blah ".trim(), "blah");
|
|
|
|
|
assert_eq!("\nwut \u{3000} ".trim(), "wut");
|
|
|
|
|
assert_eq!(" hey dude ".trim(), "hey dude");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_is_whitespace() {
|
|
|
|
|
assert!("".chars().all(|c| c.is_whitespace()));
|
|
|
|
|
assert!(" ".chars().all(|c| c.is_whitespace()));
|
|
|
|
|
assert!("\u{2009}".chars().all(|c| c.is_whitespace())); // Thin space
|
|
|
|
|
assert!(" \n\t ".chars().all(|c| c.is_whitespace()));
|
|
|
|
|
assert!(!" _ ".chars().all(|c| c.is_whitespace()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_is_utf8() {
|
|
|
|
|
// deny overlong encodings
|
|
|
|
|
assert!(from_utf8(&[0xc0, 0x80]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xc0, 0xae]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xe0, 0x80, 0x80]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xe0, 0x80, 0xaf]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xe0, 0x81, 0x81]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xf0, 0x82, 0x82, 0xac]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xf4, 0x90, 0x80, 0x80]).is_err());
|
|
|
|
|
|
|
|
|
|
// deny surrogates
|
|
|
|
|
assert!(from_utf8(&[0xED, 0xA0, 0x80]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xED, 0xBF, 0xBF]).is_err());
|
|
|
|
|
|
|
|
|
|
assert!(from_utf8(&[0xC2, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xDF, 0xBF]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xE0, 0xA0, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xED, 0x9F, 0xBF]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xEE, 0x80, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xEF, 0xBF, 0xBF]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xF0, 0x90, 0x80, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xF4, 0x8F, 0xBF, 0xBF]).is_ok());
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-05 12:39:01 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_const_is_utf8() {
|
|
|
|
|
const _: () = {
|
|
|
|
|
// deny overlong encodings
|
|
|
|
|
assert!(from_utf8(&[0xc0, 0x80]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xc0, 0xae]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xe0, 0x80, 0x80]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xe0, 0x80, 0xaf]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xe0, 0x81, 0x81]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xf0, 0x82, 0x82, 0xac]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xf4, 0x90, 0x80, 0x80]).is_err());
|
|
|
|
|
|
|
|
|
|
// deny surrogates
|
|
|
|
|
assert!(from_utf8(&[0xED, 0xA0, 0x80]).is_err());
|
|
|
|
|
assert!(from_utf8(&[0xED, 0xBF, 0xBF]).is_err());
|
|
|
|
|
|
|
|
|
|
assert!(from_utf8(&[0xC2, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xDF, 0xBF]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xE0, 0xA0, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xED, 0x9F, 0xBF]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xEE, 0x80, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xEF, 0xBF, 0xBF]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xF0, 0x90, 0x80, 0x80]).is_ok());
|
|
|
|
|
assert!(from_utf8(&[0xF4, 0x8F, 0xBF, 0xBF]).is_ok());
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
Add fast path for ASCII in UTF-8 validation
This speeds up the ascii case (and long stretches of ascii in otherwise
mixed UTF-8 data) when checking UTF-8 validity.
Benchmark results suggest that on purely ASCII input, we can improve
throughput (megabytes verified / second) by a factor of 13 to 14!
On xml and mostly english language input (en.wikipedia xml dump),
throughput increases by a factor 7.
On mostly non-ASCII input, performance increases slightly or is the
same.
The UTF-8 validation is rewritten to use indexed access; since all
access is preceded by a (mandatory for validation) length check, they
are statically elided by llvm and this formulation is in fact the best
for performance. A previous version had losses due to slice to iterator
conversions.
A large credit to Björn Steinbrink who improved this patch immensely,
writing this second version.
Benchmark results on x86-64 (Sandy Bridge) compiled with -C opt-level=3.
Old code is `regular`, this PR is called `fast`.
Datasets:
- `ascii` is just ascii (2.5 kB)
- `cyr` is cyrillic script with ascii spaces (5 kB)
- `dewik10` is 10MB of a de.wikipedia xml dump
- `enwik10` is 100MB of an en.wikipedia xml dump
- `jawik10` is 10MB of a ja.wikipedia xml dump
```
test from_utf8_ascii_fast ... bench: 140 ns/iter (+/- 4) = 18221 MB/s
test from_utf8_ascii_regular ... bench: 1,932 ns/iter (+/- 19) = 1320 MB/s
test from_utf8_cyr_fast ... bench: 10,025 ns/iter (+/- 245) = 511 MB/s
test from_utf8_cyr_regular ... bench: 12,250 ns/iter (+/- 437) = 418 MB/s
test from_utf8_dewik10_fast ... bench: 6,017,909 ns/iter (+/- 105,755) = 1740 MB/s
test from_utf8_dewik10_regular ... bench: 11,669,493 ns/iter (+/- 264,045) = 891 MB/s
test from_utf8_enwik8_fast ... bench: 14,085,692 ns/iter (+/- 1,643,316) = 7000 MB/s
test from_utf8_enwik8_regular ... bench: 93,657,410 ns/iter (+/- 5,353,353) = 1000 MB/s
test from_utf8_jawik10_fast ... bench: 29,154,073 ns/iter (+/- 4,659,534) = 340 MB/s
test from_utf8_jawik10_regular ... bench: 29,112,917 ns/iter (+/- 2,475,123) = 340 MB/s
```
Co-authored-by: Björn Steinbrink <bsteinbr@gmail.com>
2016-01-06 14:43:33 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn from_utf8_mostly_ascii() {
|
|
|
|
|
// deny invalid bytes embedded in long stretches of ascii
|
|
|
|
|
for i in 32..64 {
|
|
|
|
|
let mut data = [0; 128];
|
|
|
|
|
data[i] = 0xC0;
|
|
|
|
|
assert!(from_utf8(&data).is_err());
|
|
|
|
|
data[i] = 0xC2;
|
|
|
|
|
assert!(from_utf8(&data).is_err());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-05 12:39:01 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn const_from_utf8_mostly_ascii() {
|
|
|
|
|
const _: () = {
|
|
|
|
|
// deny invalid bytes embedded in long stretches of ascii
|
|
|
|
|
let mut i = 32;
|
|
|
|
|
while i < 64 {
|
|
|
|
|
let mut data = [0; 128];
|
|
|
|
|
data[i] = 0xC0;
|
|
|
|
|
assert!(from_utf8(&data).is_err());
|
|
|
|
|
data[i] = 0xC2;
|
|
|
|
|
assert!(from_utf8(&data).is_err());
|
|
|
|
|
|
|
|
|
|
i = i + 1;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-02 16:27:57 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn from_utf8_error() {
|
|
|
|
|
macro_rules! test {
|
2021-11-05 12:39:01 +00:00
|
|
|
|
($input: expr, $expected_valid_up_to:pat, $expected_error_len:pat) => {
|
2017-03-02 16:27:57 +00:00
|
|
|
|
let error = from_utf8($input).unwrap_err();
|
2021-11-05 12:39:01 +00:00
|
|
|
|
assert_matches!(error.valid_up_to(), $expected_valid_up_to);
|
|
|
|
|
assert_matches!(error.error_len(), $expected_error_len);
|
|
|
|
|
|
|
|
|
|
const _: () = {
|
|
|
|
|
match from_utf8($input) {
|
|
|
|
|
Err(error) => {
|
|
|
|
|
let valid_up_to = error.valid_up_to();
|
|
|
|
|
let error_len = error.error_len();
|
|
|
|
|
|
|
|
|
|
assert!(matches!(valid_up_to, $expected_valid_up_to));
|
|
|
|
|
assert!(matches!(error_len, $expected_error_len));
|
|
|
|
|
}
|
|
|
|
|
Ok(_) => unreachable!(),
|
|
|
|
|
}
|
|
|
|
|
};
|
2017-03-02 16:27:57 +00:00
|
|
|
|
};
|
|
|
|
|
}
|
2017-03-06 21:06:30 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xFF ", 4, Some(1));
|
|
|
|
|
test!(b"A\xC3\xA9 \x80 ", 4, Some(1));
|
|
|
|
|
test!(b"A\xC3\xA9 \xC1 ", 4, Some(1));
|
|
|
|
|
test!(b"A\xC3\xA9 \xC1", 4, Some(1));
|
2017-03-02 16:27:57 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xC2", 4, None);
|
2017-03-06 21:06:30 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xC2 ", 4, Some(1));
|
|
|
|
|
test!(b"A\xC3\xA9 \xC2\xC0", 4, Some(1));
|
2017-03-02 16:27:57 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xE0", 4, None);
|
2017-03-06 21:06:30 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xE0\x9F", 4, Some(1));
|
2017-03-02 16:27:57 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xE0\xA0", 4, None);
|
2017-03-06 21:06:30 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xE0\xA0\xC0", 4, Some(2));
|
|
|
|
|
test!(b"A\xC3\xA9 \xE0\xA0 ", 4, Some(2));
|
|
|
|
|
test!(b"A\xC3\xA9 \xED\xA0\x80 ", 4, Some(1));
|
2017-03-02 16:27:57 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xF1", 4, None);
|
|
|
|
|
test!(b"A\xC3\xA9 \xF1\x80", 4, None);
|
|
|
|
|
test!(b"A\xC3\xA9 \xF1\x80\x80", 4, None);
|
2017-03-06 21:06:30 +00:00
|
|
|
|
test!(b"A\xC3\xA9 \xF1 ", 4, Some(1));
|
|
|
|
|
test!(b"A\xC3\xA9 \xF1\x80 ", 4, Some(2));
|
|
|
|
|
test!(b"A\xC3\xA9 \xF1\x80\x80 ", 4, Some(3));
|
2017-03-02 16:27:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_as_bytes() {
|
|
|
|
|
// no null
|
|
|
|
|
let v = [
|
|
|
|
|
224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
|
|
|
|
|
86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
|
|
|
|
|
];
|
|
|
|
|
let b: &[u8] = &[];
|
|
|
|
|
assert_eq!("".as_bytes(), b);
|
|
|
|
|
assert_eq!("abc".as_bytes(), b"abc");
|
|
|
|
|
assert_eq!("ศไทย中华Việt Nam".as_bytes(), v);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic]
|
|
|
|
|
fn test_as_bytes_fail() {
|
|
|
|
|
// Don't double free. (I'm not sure if this exercises the
|
|
|
|
|
// original problem code path anymore.)
|
2015-06-08 14:55:35 +00:00
|
|
|
|
let s = String::from("");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let _bytes = s.as_bytes();
|
|
|
|
|
panic!();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_as_ptr() {
|
|
|
|
|
let buf = "hello".as_ptr();
|
|
|
|
|
unsafe {
|
2022-08-19 09:33:06 +00:00
|
|
|
|
assert_eq!(*buf.add(0), b'h');
|
|
|
|
|
assert_eq!(*buf.add(1), b'e');
|
|
|
|
|
assert_eq!(*buf.add(2), b'l');
|
|
|
|
|
assert_eq!(*buf.add(3), b'l');
|
|
|
|
|
assert_eq!(*buf.add(4), b'o');
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn vec_str_conversions() {
|
2015-06-08 14:55:35 +00:00
|
|
|
|
let s1: String = String::from("All mimsy were the borogoves");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
|
|
|
|
|
let v: Vec<u8> = s1.as_bytes().to_vec();
|
2015-06-08 14:55:35 +00:00
|
|
|
|
let s2: String = String::from(from_utf8(&v).unwrap());
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let mut i = 0;
|
|
|
|
|
let n1 = s1.len();
|
|
|
|
|
let n2 = v.len();
|
|
|
|
|
assert_eq!(n1, n2);
|
|
|
|
|
while i < n1 {
|
|
|
|
|
let a: u8 = s1.as_bytes()[i];
|
|
|
|
|
let b: u8 = s2.as_bytes()[i];
|
|
|
|
|
assert_eq!(a, b);
|
|
|
|
|
i += 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_contains() {
|
|
|
|
|
assert!("abcde".contains("bcd"));
|
|
|
|
|
assert!("abcde".contains("abcd"));
|
|
|
|
|
assert!("abcde".contains("bcde"));
|
|
|
|
|
assert!("abcde".contains(""));
|
|
|
|
|
assert!("".contains(""));
|
|
|
|
|
assert!(!"abcde".contains("def"));
|
|
|
|
|
assert!(!"".contains("a"));
|
|
|
|
|
|
|
|
|
|
let data = "ประเทศไทย中华Việt Nam";
|
|
|
|
|
assert!(data.contains("ประเ"));
|
|
|
|
|
assert!(data.contains("ะเ"));
|
|
|
|
|
assert!(data.contains("中华"));
|
|
|
|
|
assert!(!data.contains("ไท华"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_contains_char() {
|
2015-03-30 18:00:05 +00:00
|
|
|
|
assert!("abc".contains('b'));
|
|
|
|
|
assert!("a".contains('a'));
|
|
|
|
|
assert!(!"abc".contains('d'));
|
|
|
|
|
assert!(!"".contains('a'));
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-09 09:23:22 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_split_at() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
for (index, _) in s.char_indices() {
|
|
|
|
|
let (a, b) = s.split_at(index);
|
|
|
|
|
assert_eq!(&s[..a.len()], a);
|
|
|
|
|
assert_eq!(&s[a.len()..], b);
|
|
|
|
|
}
|
|
|
|
|
let (a, b) = s.split_at(s.len());
|
|
|
|
|
assert_eq!(a, s);
|
|
|
|
|
assert_eq!(b, "");
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-15 17:24:52 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_split_at_mut() {
|
|
|
|
|
let mut s = "Hello World".to_string();
|
|
|
|
|
{
|
|
|
|
|
let (a, b) = s.split_at_mut(5);
|
|
|
|
|
a.make_ascii_uppercase();
|
|
|
|
|
b.make_ascii_lowercase();
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(s, "HELLO world");
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-09 09:23:22 +00:00
|
|
|
|
#[test]
|
|
|
|
|
#[should_panic]
|
|
|
|
|
fn test_split_at_boundscheck() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
2021-10-14 22:54:55 +00:00
|
|
|
|
let _ = s.split_at(1);
|
2015-06-09 09:23:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_escape_unicode() {
|
2019-02-01 12:31:24 +00:00
|
|
|
|
assert_eq!("abc".escape_unicode().to_string(), "\\u{61}\\u{62}\\u{63}");
|
|
|
|
|
assert_eq!("a c".escape_unicode().to_string(), "\\u{61}\\u{20}\\u{63}");
|
|
|
|
|
assert_eq!("\r\n\t".escape_unicode().to_string(), "\\u{d}\\u{a}\\u{9}");
|
|
|
|
|
assert_eq!("'\"\\".escape_unicode().to_string(), "\\u{27}\\u{22}\\u{5c}");
|
|
|
|
|
assert_eq!("\x00\x01\u{fe}\u{ff}".escape_unicode().to_string(), "\\u{0}\\u{1}\\u{fe}\\u{ff}");
|
|
|
|
|
assert_eq!("\u{100}\u{ffff}".escape_unicode().to_string(), "\\u{100}\\u{ffff}");
|
|
|
|
|
assert_eq!("\u{10000}\u{10ffff}".escape_unicode().to_string(), "\\u{10000}\\u{10ffff}");
|
|
|
|
|
assert_eq!("ab\u{fb00}".escape_unicode().to_string(), "\\u{61}\\u{62}\\u{fb00}");
|
|
|
|
|
assert_eq!("\u{1d4ea}\r".escape_unicode().to_string(), "\\u{1d4ea}\\u{d}");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2016-07-27 10:10:31 +00:00
|
|
|
|
fn test_escape_debug() {
|
2018-05-21 17:57:49 +00:00
|
|
|
|
// Note that there are subtleties with the number of backslashes
|
|
|
|
|
// on the left- and right-hand sides. In particular, Unicode code points
|
|
|
|
|
// are usually escaped with two backslashes on the right-hand side, as
|
2018-11-27 02:59:49 +00:00
|
|
|
|
// they are escaped. However, when the character is unescaped (e.g., for
|
2018-05-21 17:57:49 +00:00
|
|
|
|
// printable characters), only a single backslash appears (as the character
|
|
|
|
|
// itself appears in the debug string).
|
2019-02-01 12:31:24 +00:00
|
|
|
|
assert_eq!("abc".escape_debug().to_string(), "abc");
|
|
|
|
|
assert_eq!("a c".escape_debug().to_string(), "a c");
|
|
|
|
|
assert_eq!("éèê".escape_debug().to_string(), "éèê");
|
2022-03-26 20:35:58 +00:00
|
|
|
|
assert_eq!("\0\r\n\t".escape_debug().to_string(), "\\0\\r\\n\\t");
|
2019-02-01 12:31:24 +00:00
|
|
|
|
assert_eq!("'\"\\".escape_debug().to_string(), "\\'\\\"\\\\");
|
|
|
|
|
assert_eq!("\u{7f}\u{ff}".escape_debug().to_string(), "\\u{7f}\u{ff}");
|
|
|
|
|
assert_eq!("\u{100}\u{ffff}".escape_debug().to_string(), "\u{100}\\u{ffff}");
|
|
|
|
|
assert_eq!("\u{10000}\u{10ffff}".escape_debug().to_string(), "\u{10000}\\u{10ffff}");
|
|
|
|
|
assert_eq!("ab\u{200b}".escape_debug().to_string(), "ab\\u{200b}");
|
|
|
|
|
assert_eq!("\u{10d4ea}\r".escape_debug().to_string(), "\\u{10d4ea}\\r");
|
|
|
|
|
assert_eq!(
|
|
|
|
|
"\u{301}a\u{301}bé\u{e000}".escape_debug().to_string(),
|
|
|
|
|
"\\u{301}a\u{301}bé\\u{e000}"
|
|
|
|
|
);
|
2016-07-25 23:39:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_escape_default() {
|
2019-02-01 12:31:24 +00:00
|
|
|
|
assert_eq!("abc".escape_default().to_string(), "abc");
|
|
|
|
|
assert_eq!("a c".escape_default().to_string(), "a c");
|
|
|
|
|
assert_eq!("éèê".escape_default().to_string(), "\\u{e9}\\u{e8}\\u{ea}");
|
|
|
|
|
assert_eq!("\r\n\t".escape_default().to_string(), "\\r\\n\\t");
|
|
|
|
|
assert_eq!("'\"\\".escape_default().to_string(), "\\'\\\"\\\\");
|
|
|
|
|
assert_eq!("\u{7f}\u{ff}".escape_default().to_string(), "\\u{7f}\\u{ff}");
|
|
|
|
|
assert_eq!("\u{100}\u{ffff}".escape_default().to_string(), "\\u{100}\\u{ffff}");
|
|
|
|
|
assert_eq!("\u{10000}\u{10ffff}".escape_default().to_string(), "\\u{10000}\\u{10ffff}");
|
|
|
|
|
assert_eq!("ab\u{200b}".escape_default().to_string(), "ab\\u{200b}");
|
|
|
|
|
assert_eq!("\u{10d4ea}\r".escape_default().to_string(), "\\u{10d4ea}\\r");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_total_ord() {
|
2015-10-22 02:19:14 +00:00
|
|
|
|
assert_eq!("1234".cmp("123"), Greater);
|
|
|
|
|
assert_eq!("123".cmp("1234"), Less);
|
|
|
|
|
assert_eq!("1234".cmp("1234"), Equal);
|
|
|
|
|
assert_eq!("12345555".cmp("123456"), Less);
|
|
|
|
|
assert_eq!("22".cmp("1234"), Greater);
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_iterator() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let v = ['ศ', 'ไ', 'ท', 'ย', '中', '华', 'V', 'i', 'ệ', 't', ' ', 'N', 'a', 'm'];
|
|
|
|
|
|
|
|
|
|
let mut pos = 0;
|
|
|
|
|
let it = s.chars();
|
|
|
|
|
|
|
|
|
|
for c in it {
|
|
|
|
|
assert_eq!(c, v[pos]);
|
|
|
|
|
pos += 1;
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(pos, v.len());
|
2016-11-19 22:18:43 +00:00
|
|
|
|
assert_eq!(s.chars().count(), v.len());
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_rev_iterator() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let v = ['m', 'a', 'N', ' ', 't', 'ệ', 'i', 'V', '华', '中', 'ย', 'ท', 'ไ', 'ศ'];
|
|
|
|
|
|
|
|
|
|
let mut pos = 0;
|
|
|
|
|
let it = s.chars().rev();
|
|
|
|
|
|
|
|
|
|
for c in it {
|
|
|
|
|
assert_eq!(c, v[pos]);
|
|
|
|
|
pos += 1;
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(pos, v.len());
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-11 15:40:04 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_to_lowercase_rev_iterator() {
|
|
|
|
|
let s = "AÖßÜ💩ΣΤΙΓΜΑΣDžfiİ";
|
|
|
|
|
let v = ['\u{307}', 'i', 'fi', 'dž', 'σ', 'α', 'μ', 'γ', 'ι', 'τ', 'σ', '💩', 'ü', 'ß', 'ö', 'a'];
|
|
|
|
|
|
|
|
|
|
let mut pos = 0;
|
|
|
|
|
let it = s.chars().flat_map(|c| c.to_lowercase()).rev();
|
|
|
|
|
|
|
|
|
|
for c in it {
|
|
|
|
|
assert_eq!(c, v[pos]);
|
|
|
|
|
pos += 1;
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(pos, v.len());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_to_uppercase_rev_iterator() {
|
|
|
|
|
let s = "aößü💩στιγμαςDžfiᾀ";
|
|
|
|
|
let v =
|
|
|
|
|
['Ι', 'Ἀ', 'I', 'F', 'DŽ', 'Σ', 'Α', 'Μ', 'Γ', 'Ι', 'Τ', 'Σ', '💩', 'Ü', 'S', 'S', 'Ö', 'A'];
|
|
|
|
|
|
|
|
|
|
let mut pos = 0;
|
|
|
|
|
let it = s.chars().flat_map(|c| c.to_uppercase()).rev();
|
|
|
|
|
|
|
|
|
|
for c in it {
|
|
|
|
|
assert_eq!(c, v[pos]);
|
|
|
|
|
pos += 1;
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(pos, v.len());
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
2019-12-07 11:42:19 +00:00
|
|
|
|
#[cfg_attr(miri, ignore)] // Miri is too slow
|
2015-03-11 04:58:16 +00:00
|
|
|
|
fn test_chars_decoding() {
|
2016-09-08 11:54:39 +00:00
|
|
|
|
let mut bytes = [0; 4];
|
2019-02-02 09:34:36 +00:00
|
|
|
|
for c in (0..0x110000).filter_map(std::char::from_u32) {
|
2016-09-08 11:54:39 +00:00
|
|
|
|
let s = c.encode_utf8(&mut bytes);
|
2015-03-11 04:58:16 +00:00
|
|
|
|
if Some(c) != s.chars().next() {
|
|
|
|
|
panic!("character {:x}={} does not decode correctly", c as u32, c);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2019-12-07 11:42:19 +00:00
|
|
|
|
#[cfg_attr(miri, ignore)] // Miri is too slow
|
2015-03-11 04:58:16 +00:00
|
|
|
|
fn test_chars_rev_decoding() {
|
2016-09-08 11:54:39 +00:00
|
|
|
|
let mut bytes = [0; 4];
|
2019-02-02 09:34:36 +00:00
|
|
|
|
for c in (0..0x110000).filter_map(std::char::from_u32) {
|
2016-09-08 11:54:39 +00:00
|
|
|
|
let s = c.encode_utf8(&mut bytes);
|
2015-03-11 04:58:16 +00:00
|
|
|
|
if Some(c) != s.chars().rev().next() {
|
|
|
|
|
panic!("character {:x}={} does not decode correctly", c as u32, c);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_iterator_clone() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let mut it = s.chars();
|
|
|
|
|
it.next();
|
|
|
|
|
assert!(it.clone().zip(it).all(|(x, y)| x == y));
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-19 18:43:41 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_iterator_last() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let mut it = s.chars();
|
|
|
|
|
it.next();
|
|
|
|
|
assert_eq!(it.last(), Some('m'));
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-26 06:58:37 +00:00
|
|
|
|
#[test]
|
2019-07-29 16:26:59 +00:00
|
|
|
|
fn test_chars_debug() {
|
2019-07-26 06:58:37 +00:00
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let c = s.chars();
|
|
|
|
|
assert_eq!(
|
2022-02-12 19:16:17 +00:00
|
|
|
|
format!("{c:?}"),
|
2019-07-26 06:58:37 +00:00
|
|
|
|
r#"Chars(['ศ', 'ไ', 'ท', 'ย', '中', '华', 'V', 'i', 'ệ', 't', ' ', 'N', 'a', 'm'])"#
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_bytesator() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let v = [
|
|
|
|
|
224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
|
|
|
|
|
86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
|
|
|
|
|
];
|
|
|
|
|
let mut pos = 0;
|
|
|
|
|
|
|
|
|
|
for b in s.bytes() {
|
|
|
|
|
assert_eq!(b, v[pos]);
|
|
|
|
|
pos += 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_bytes_revator() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let v = [
|
|
|
|
|
224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
|
|
|
|
|
86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
|
|
|
|
|
];
|
|
|
|
|
let mut pos = v.len();
|
|
|
|
|
|
|
|
|
|
for b in s.bytes().rev() {
|
|
|
|
|
pos -= 1;
|
|
|
|
|
assert_eq!(b, v[pos]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-30 15:32:43 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_bytesator_nth() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let v = [
|
|
|
|
|
224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
|
|
|
|
|
86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
let mut b = s.bytes();
|
|
|
|
|
assert_eq!(b.nth(2).unwrap(), v[2]);
|
|
|
|
|
assert_eq!(b.nth(10).unwrap(), v[10]);
|
|
|
|
|
assert_eq!(b.nth(200), None);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_bytesator_count() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
|
|
|
|
|
let b = s.bytes();
|
|
|
|
|
assert_eq!(b.count(), 28)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_bytesator_last() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
|
|
|
|
|
let b = s.bytes();
|
|
|
|
|
assert_eq!(b.last().unwrap(), 109)
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_char_indicesator() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let p = [0, 3, 6, 9, 12, 15, 18, 19, 20, 23, 24, 25, 26, 27];
|
|
|
|
|
let v = ['ศ', 'ไ', 'ท', 'ย', '中', '华', 'V', 'i', 'ệ', 't', ' ', 'N', 'a', 'm'];
|
|
|
|
|
|
|
|
|
|
let mut pos = 0;
|
|
|
|
|
let it = s.char_indices();
|
|
|
|
|
|
|
|
|
|
for c in it {
|
|
|
|
|
assert_eq!(c, (p[pos], v[pos]));
|
|
|
|
|
pos += 1;
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(pos, v.len());
|
|
|
|
|
assert_eq!(pos, p.len());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_char_indices_revator() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let p = [27, 26, 25, 24, 23, 20, 19, 18, 15, 12, 9, 6, 3, 0];
|
|
|
|
|
let v = ['m', 'a', 'N', ' ', 't', 'ệ', 'i', 'V', '华', '中', 'ย', 'ท', 'ไ', 'ศ'];
|
|
|
|
|
|
|
|
|
|
let mut pos = 0;
|
|
|
|
|
let it = s.char_indices().rev();
|
|
|
|
|
|
|
|
|
|
for c in it {
|
|
|
|
|
assert_eq!(c, (p[pos], v[pos]));
|
|
|
|
|
pos += 1;
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(pos, v.len());
|
|
|
|
|
assert_eq!(pos, p.len());
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-20 00:37:48 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_char_indices_last() {
|
|
|
|
|
let s = "ศไทย中华Việt Nam";
|
|
|
|
|
let mut it = s.char_indices();
|
|
|
|
|
it.next();
|
|
|
|
|
assert_eq!(it.last(), Some((27, 'm')));
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_splitn_char_iterator() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
2015-04-01 18:28:34 +00:00
|
|
|
|
let split: Vec<&str> = data.splitn(4, ' ').collect();
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!(split, ["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]);
|
|
|
|
|
|
2015-04-01 18:28:34 +00:00
|
|
|
|
let split: Vec<&str> = data.splitn(4, |c: char| c == ' ').collect();
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!(split, ["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]);
|
|
|
|
|
|
|
|
|
|
// Unicode
|
2015-04-01 18:28:34 +00:00
|
|
|
|
let split: Vec<&str> = data.splitn(4, 'ä').collect();
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!(split, ["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]);
|
|
|
|
|
|
2015-04-01 18:28:34 +00:00
|
|
|
|
let split: Vec<&str> = data.splitn(4, |c: char| c == 'ä').collect();
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!(split, ["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_split_char_iterator_no_trailing() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.split('\n').collect();
|
|
|
|
|
assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb", ""]);
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.split_terminator('\n').collect();
|
|
|
|
|
assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb"]);
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-15 19:05:25 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_split_char_iterator_inclusive() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.split_inclusive('\n').collect();
|
2020-01-25 08:47:46 +00:00
|
|
|
|
assert_eq!(split, ["\n", "Märy häd ä little lämb\n", "Little lämb\n"]);
|
2019-12-15 19:05:25 +00:00
|
|
|
|
|
|
|
|
|
let uppercase_separated = "SheePSharKTurtlECaT";
|
|
|
|
|
let mut first_char = true;
|
2020-01-25 08:47:46 +00:00
|
|
|
|
let split: Vec<&str> = uppercase_separated
|
|
|
|
|
.split_inclusive(|c: char| {
|
|
|
|
|
let split = !first_char && c.is_uppercase();
|
|
|
|
|
first_char = split;
|
|
|
|
|
split
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
assert_eq!(split, ["SheeP", "SharK", "TurtlE", "CaT"]);
|
2019-12-15 19:05:25 +00:00
|
|
|
|
}
|
|
|
|
|
|
2020-01-25 08:47:46 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_split_char_iterator_inclusive_rev() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.split_inclusive('\n').rev().collect();
|
|
|
|
|
assert_eq!(split, ["Little lämb\n", "Märy häd ä little lämb\n", "\n"]);
|
|
|
|
|
|
|
|
|
|
// Note that the predicate is stateful and thus dependent
|
|
|
|
|
// on the iteration order.
|
|
|
|
|
// (A different predicate is needed for reverse iterator vs normal iterator.)
|
|
|
|
|
// Not sure if anything can be done though.
|
|
|
|
|
let uppercase_separated = "SheePSharKTurtlECaT";
|
|
|
|
|
let mut term_char = true;
|
|
|
|
|
let split: Vec<&str> = uppercase_separated
|
|
|
|
|
.split_inclusive(|c: char| {
|
|
|
|
|
let split = term_char && c.is_uppercase();
|
|
|
|
|
term_char = c.is_uppercase();
|
|
|
|
|
split
|
|
|
|
|
})
|
|
|
|
|
.rev()
|
|
|
|
|
.collect();
|
|
|
|
|
assert_eq!(split, ["CaT", "TurtlE", "SharK", "SheeP"]);
|
|
|
|
|
}
|
2019-12-15 19:05:25 +00:00
|
|
|
|
|
2015-03-14 23:34:21 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_rsplit() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.rsplit(' ').collect();
|
|
|
|
|
assert_eq!(split, ["lämb\n", "lämb\nLittle", "little", "ä", "häd", "\nMäry"]);
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.rsplit("lämb").collect();
|
|
|
|
|
assert_eq!(split, ["\n", "\nLittle ", "\nMäry häd ä little "]);
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.rsplit(|c: char| c == 'ä').collect();
|
|
|
|
|
assert_eq!(split, ["mb\n", "mb\nLittle l", " little l", "d ", "ry h", "\nM"]);
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-15 00:07:13 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_rsplitn() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
2015-04-01 18:28:34 +00:00
|
|
|
|
let split: Vec<&str> = data.rsplitn(2, ' ').collect();
|
2015-03-15 00:07:13 +00:00
|
|
|
|
assert_eq!(split, ["lämb\n", "\nMäry häd ä little lämb\nLittle"]);
|
|
|
|
|
|
2015-04-01 18:28:34 +00:00
|
|
|
|
let split: Vec<&str> = data.rsplitn(2, "lämb").collect();
|
2015-03-15 00:07:13 +00:00
|
|
|
|
assert_eq!(split, ["\n", "\nMäry häd ä little lämb\nLittle "]);
|
|
|
|
|
|
2015-04-01 18:28:34 +00:00
|
|
|
|
let split: Vec<&str> = data.rsplitn(2, |c: char| c == 'ä').collect();
|
2015-03-15 00:07:13 +00:00
|
|
|
|
assert_eq!(split, ["mb\n", "\nMäry häd ä little lämb\nLittle l"]);
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-24 07:39:09 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_split_once() {
|
|
|
|
|
assert_eq!("".split_once("->"), None);
|
|
|
|
|
assert_eq!("-".split_once("->"), None);
|
|
|
|
|
assert_eq!("->".split_once("->"), Some(("", "")));
|
|
|
|
|
assert_eq!("a->".split_once("->"), Some(("a", "")));
|
|
|
|
|
assert_eq!("->b".split_once("->"), Some(("", "b")));
|
|
|
|
|
assert_eq!("a->b".split_once("->"), Some(("a", "b")));
|
|
|
|
|
assert_eq!("a->b->c".split_once("->"), Some(("a", "b->c")));
|
|
|
|
|
assert_eq!("---".split_once("--"), Some(("", "-")));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_rsplit_once() {
|
|
|
|
|
assert_eq!("".rsplit_once("->"), None);
|
|
|
|
|
assert_eq!("-".rsplit_once("->"), None);
|
|
|
|
|
assert_eq!("->".rsplit_once("->"), Some(("", "")));
|
|
|
|
|
assert_eq!("a->".rsplit_once("->"), Some(("a", "")));
|
|
|
|
|
assert_eq!("->b".rsplit_once("->"), Some(("", "b")));
|
|
|
|
|
assert_eq!("a->b".rsplit_once("->"), Some(("a", "b")));
|
|
|
|
|
assert_eq!("a->b->c".rsplit_once("->"), Some(("a->b", "c")));
|
|
|
|
|
assert_eq!("---".rsplit_once("--"), Some(("-", "")));
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-11 04:58:16 +00:00
|
|
|
|
#[test]
|
2015-04-18 17:49:51 +00:00
|
|
|
|
fn test_split_whitespace() {
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let data = "\n \tMäry häd\tä little lämb\nLittle lämb\n";
|
2015-04-18 17:49:51 +00:00
|
|
|
|
let words: Vec<&str> = data.split_whitespace().collect();
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!(words, ["Märy", "häd", "ä", "little", "lämb", "Little", "lämb"])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_lines() {
|
2022-08-09 07:12:15 +00:00
|
|
|
|
fn t(data: &str, expected: &[&str]) {
|
|
|
|
|
let lines: Vec<&str> = data.lines().collect();
|
|
|
|
|
assert_eq!(lines, expected);
|
|
|
|
|
}
|
|
|
|
|
t("", &[]);
|
|
|
|
|
t("\n", &[""]);
|
|
|
|
|
t("\n2nd", &["", "2nd"]);
|
|
|
|
|
t("\r\n", &[""]);
|
|
|
|
|
t("bare\r", &["bare\r"]);
|
|
|
|
|
t("bare\rcr", &["bare\rcr"]);
|
|
|
|
|
t("Text\n\r", &["Text", "\r"]);
|
|
|
|
|
t(
|
|
|
|
|
"\nMäry häd ä little lämb\n\r\nLittle lämb\n",
|
|
|
|
|
&["", "Märy häd ä little lämb", "", "Little lämb"],
|
|
|
|
|
);
|
|
|
|
|
t(
|
|
|
|
|
"\r\nMäry häd ä little lämb\n\nLittle lämb",
|
|
|
|
|
&["", "Märy häd ä little lämb", "", "Little lämb"],
|
|
|
|
|
);
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2015-03-30 18:00:05 +00:00
|
|
|
|
fn test_splitator() {
|
2015-03-11 04:58:16 +00:00
|
|
|
|
fn t(s: &str, sep: &str, u: &[&str]) {
|
2015-03-30 18:00:05 +00:00
|
|
|
|
let v: Vec<&str> = s.split(sep).collect();
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!(v, u);
|
|
|
|
|
}
|
|
|
|
|
t("--1233345--", "12345", &["--1233345--"]);
|
|
|
|
|
t("abc::hello::there", "::", &["abc", "hello", "there"]);
|
|
|
|
|
t("::hello::there", "::", &["", "hello", "there"]);
|
|
|
|
|
t("hello::there::", "::", &["hello", "there", ""]);
|
|
|
|
|
t("::hello::there::", "::", &["", "hello", "there", ""]);
|
|
|
|
|
t("ประเทศไทย中华Việt Nam", "中华", &["ประเทศไทย", "Việt Nam"]);
|
|
|
|
|
t("zzXXXzzYYYzz", "zz", &["", "XXX", "YYY", ""]);
|
|
|
|
|
t("zzXXXzYYYz", "XXX", &["zz", "zYYYz"]);
|
|
|
|
|
t(".XXX.YYY.", ".", &["", "XXX", "YYY", ""]);
|
|
|
|
|
t("", ".", &[""]);
|
|
|
|
|
t("zz", "zz", &["", ""]);
|
|
|
|
|
t("ok", "z", &["ok"]);
|
|
|
|
|
t("zzz", "zz", &["", "z"]);
|
|
|
|
|
t("zzzzz", "zz", &["", "", "z"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_str_default() {
|
|
|
|
|
use std::default::Default;
|
|
|
|
|
|
2015-03-30 16:22:46 +00:00
|
|
|
|
fn t<S: Default + AsRef<str>>() {
|
2015-03-11 04:58:16 +00:00
|
|
|
|
let s: S = Default::default();
|
2015-03-30 16:22:46 +00:00
|
|
|
|
assert_eq!(s.as_ref(), "");
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
t::<&str>();
|
|
|
|
|
t::<String>();
|
2018-06-02 16:29:50 +00:00
|
|
|
|
t::<&mut str>();
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_str_container() {
|
|
|
|
|
fn sum_len(v: &[&str]) -> usize {
|
|
|
|
|
v.iter().map(|x| x.len()).sum()
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-22 02:14:11 +00:00
|
|
|
|
let s = "01234";
|
2015-03-11 04:58:16 +00:00
|
|
|
|
assert_eq!(5, sum_len(&["012", "", "34"]));
|
2015-10-22 02:14:11 +00:00
|
|
|
|
assert_eq!(5, sum_len(&["01", "2", "34", ""]));
|
|
|
|
|
assert_eq!(5, sum_len(&[s]));
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_str_from_utf8() {
|
|
|
|
|
let xs = b"hello";
|
|
|
|
|
assert_eq!(from_utf8(xs), Ok("hello"));
|
|
|
|
|
|
|
|
|
|
let xs = "ศไทย中华Việt Nam".as_bytes();
|
|
|
|
|
assert_eq!(from_utf8(xs), Ok("ศไทย中华Việt Nam"));
|
|
|
|
|
|
|
|
|
|
let xs = b"hello\xFF";
|
2015-04-10 23:05:09 +00:00
|
|
|
|
assert!(from_utf8(xs).is_err());
|
2015-03-11 04:58:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-04-01 20:45:00 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_pattern_deref_forward() {
|
|
|
|
|
let data = "aabcdaa";
|
|
|
|
|
assert!(data.contains("bcd"));
|
|
|
|
|
assert!(data.contains(&"bcd"));
|
|
|
|
|
assert!(data.contains(&"bcd".to_string()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_empty_match_indices() {
|
|
|
|
|
let data = "aä中!";
|
|
|
|
|
let vec: Vec<_> = data.match_indices("").collect();
|
2015-09-24 15:38:48 +00:00
|
|
|
|
assert_eq!(vec, [(0, ""), (1, ""), (3, ""), (6, ""), (7, "")]);
|
2015-04-01 20:45:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_bool_from_str() {
|
|
|
|
|
assert_eq!("true".parse().ok(), Some(true));
|
|
|
|
|
assert_eq!("false".parse().ok(), Some(false));
|
|
|
|
|
assert_eq!("not even a boolean".parse::<bool>().ok(), None);
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-30 20:50:08 +00:00
|
|
|
|
fn check_contains_all_substrings(haystack: &str) {
|
|
|
|
|
let mut modified_needle = String::new();
|
|
|
|
|
|
|
|
|
|
for i in 0..haystack.len() {
|
|
|
|
|
// check different haystack lengths since we special-case short haystacks.
|
|
|
|
|
let haystack = &haystack[0..i];
|
|
|
|
|
assert!(haystack.contains(""));
|
|
|
|
|
for j in 0..haystack.len() {
|
|
|
|
|
for k in j + 1..=haystack.len() {
|
|
|
|
|
let needle = &haystack[j..k];
|
|
|
|
|
assert!(haystack.contains(needle));
|
|
|
|
|
modified_needle.clear();
|
|
|
|
|
modified_needle.push_str(needle);
|
|
|
|
|
modified_needle.replace_range(0..1, "\0");
|
|
|
|
|
assert!(!haystack.contains(&modified_needle));
|
|
|
|
|
|
|
|
|
|
modified_needle.clear();
|
|
|
|
|
modified_needle.push_str(needle);
|
|
|
|
|
modified_needle.replace_range(needle.len() - 1..needle.len(), "\0");
|
|
|
|
|
assert!(!haystack.contains(&modified_needle));
|
|
|
|
|
}
|
2015-04-01 20:45:00 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2019-12-07 11:42:19 +00:00
|
|
|
|
#[cfg_attr(miri, ignore)] // Miri is too slow
|
2015-04-01 20:45:00 +00:00
|
|
|
|
fn strslice_issue_16589() {
|
|
|
|
|
assert!("bananas".contains("nana"));
|
|
|
|
|
|
|
|
|
|
// prior to the fix for #16589, x.contains("abcdabcd") returned false
|
|
|
|
|
// test all substrings for good measure
|
|
|
|
|
check_contains_all_substrings("012345678901234567890123456789bcdabcdabcd");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn strslice_issue_16878() {
|
|
|
|
|
assert!(!"1234567ah012345678901ah".contains("hah"));
|
|
|
|
|
assert!(!"00abc01234567890123456789abc".contains("bcabc"));
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-22 19:54:10 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn strslice_issue_104726() {
|
|
|
|
|
// Edge-case in the simd_contains impl.
|
|
|
|
|
// The first and last byte are the same so it backtracks by one byte
|
|
|
|
|
// which aligns with the end of the string. Previously incorrect offset calculations
|
|
|
|
|
// lead to out-of-bounds slicing.
|
|
|
|
|
#[rustfmt::skip]
|
|
|
|
|
let needle = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaba";
|
|
|
|
|
let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab";
|
|
|
|
|
assert!(!haystack.contains(needle));
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-01 20:45:00 +00:00
|
|
|
|
#[test]
|
2019-12-07 11:42:19 +00:00
|
|
|
|
#[cfg_attr(miri, ignore)] // Miri is too slow
|
2015-04-01 20:45:00 +00:00
|
|
|
|
fn test_strslice_contains() {
|
|
|
|
|
let x = "There are moments, Jeeves, when one asks oneself, 'Do trousers matter?'";
|
|
|
|
|
check_contains_all_substrings(x);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_rsplitn_char_iterator() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
|
|
|
|
let mut split: Vec<&str> = data.rsplitn(4, ' ').collect();
|
|
|
|
|
split.reverse();
|
|
|
|
|
assert_eq!(split, ["\nMäry häd ä", "little", "lämb\nLittle", "lämb\n"]);
|
|
|
|
|
|
|
|
|
|
let mut split: Vec<&str> = data.rsplitn(4, |c: char| c == ' ').collect();
|
|
|
|
|
split.reverse();
|
|
|
|
|
assert_eq!(split, ["\nMäry häd ä", "little", "lämb\nLittle", "lämb\n"]);
|
|
|
|
|
|
|
|
|
|
// Unicode
|
|
|
|
|
let mut split: Vec<&str> = data.rsplitn(4, 'ä').collect();
|
|
|
|
|
split.reverse();
|
|
|
|
|
assert_eq!(split, ["\nMäry häd ", " little l", "mb\nLittle l", "mb\n"]);
|
|
|
|
|
|
|
|
|
|
let mut split: Vec<&str> = data.rsplitn(4, |c: char| c == 'ä').collect();
|
|
|
|
|
split.reverse();
|
|
|
|
|
assert_eq!(split, ["\nMäry häd ", " little l", "mb\nLittle l", "mb\n"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_split_char_iterator() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.split(' ').collect();
|
|
|
|
|
assert_eq!(split, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
|
|
|
|
|
|
|
|
|
|
let mut rsplit: Vec<&str> = data.split(' ').rev().collect();
|
|
|
|
|
rsplit.reverse();
|
|
|
|
|
assert_eq!(rsplit, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.split(|c: char| c == ' ').collect();
|
|
|
|
|
assert_eq!(split, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
|
|
|
|
|
|
|
|
|
|
let mut rsplit: Vec<&str> = data.split(|c: char| c == ' ').rev().collect();
|
|
|
|
|
rsplit.reverse();
|
|
|
|
|
assert_eq!(rsplit, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
|
|
|
|
|
|
|
|
|
|
// Unicode
|
|
|
|
|
let split: Vec<&str> = data.split('ä').collect();
|
|
|
|
|
assert_eq!(split, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
|
|
|
|
|
|
|
|
|
|
let mut rsplit: Vec<&str> = data.split('ä').rev().collect();
|
|
|
|
|
rsplit.reverse();
|
|
|
|
|
assert_eq!(rsplit, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
|
|
|
|
|
|
|
|
|
|
let split: Vec<&str> = data.split(|c: char| c == 'ä').collect();
|
|
|
|
|
assert_eq!(split, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
|
|
|
|
|
|
|
|
|
|
let mut rsplit: Vec<&str> = data.split(|c: char| c == 'ä').rev().collect();
|
|
|
|
|
rsplit.reverse();
|
|
|
|
|
assert_eq!(rsplit, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_rev_split_char_iterator_no_trailing() {
|
|
|
|
|
let data = "\nMäry häd ä little lämb\nLittle lämb\n";
|
|
|
|
|
|
|
|
|
|
let mut split: Vec<&str> = data.split('\n').rev().collect();
|
|
|
|
|
split.reverse();
|
|
|
|
|
assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb", ""]);
|
|
|
|
|
|
|
|
|
|
let mut split: Vec<&str> = data.split_terminator('\n').rev().collect();
|
|
|
|
|
split.reverse();
|
|
|
|
|
assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_utf16_code_units() {
|
2018-04-06 08:24:01 +00:00
|
|
|
|
assert_eq!("é\u{1F4A9}".encode_utf16().collect::<Vec<u16>>(), [0xE9, 0xD83D, 0xDCA9])
|
2015-04-01 20:45:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
2023-07-20 19:52:33 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_utf16_size_hint() {
|
|
|
|
|
assert_eq!("".encode_utf16().size_hint(), (0, Some(0)));
|
|
|
|
|
assert_eq!("123".encode_utf16().size_hint(), (1, Some(3)));
|
|
|
|
|
assert_eq!("1234".encode_utf16().size_hint(), (2, Some(4)));
|
|
|
|
|
assert_eq!("12345678".encode_utf16().size_hint(), (3, Some(8)));
|
|
|
|
|
|
|
|
|
|
fn hint_vec(src: &str) -> Vec<(usize, Option<usize>)> {
|
|
|
|
|
let mut it = src.encode_utf16();
|
|
|
|
|
let mut result = Vec::new();
|
|
|
|
|
result.push(it.size_hint());
|
|
|
|
|
while it.next().is_some() {
|
|
|
|
|
result.push(it.size_hint())
|
|
|
|
|
}
|
|
|
|
|
result
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert_eq!(hint_vec("12"), [(1, Some(2)), (1, Some(1)), (0, Some(0))]);
|
|
|
|
|
assert_eq!(hint_vec("\u{101234}"), [(2, Some(4)), (1, Some(1)), (0, Some(0))]);
|
|
|
|
|
assert_eq!(hint_vec("\u{101234}a"), [(2, Some(5)), (2, Some(2)), (1, Some(1)), (0, Some(0))]);
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-01 20:45:00 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn starts_with_in_unicode() {
|
|
|
|
|
assert!(!"├── Cargo.toml".starts_with("# "));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn starts_short_long() {
|
|
|
|
|
assert!(!"".starts_with("##"));
|
|
|
|
|
assert!(!"##".starts_with("####"));
|
|
|
|
|
assert!("####".starts_with("##"));
|
|
|
|
|
assert!(!"##ä".starts_with("####"));
|
|
|
|
|
assert!("####ä".starts_with("##"));
|
|
|
|
|
assert!(!"##".starts_with("####ä"));
|
|
|
|
|
assert!("##ä##".starts_with("##ä"));
|
|
|
|
|
|
|
|
|
|
assert!("".starts_with(""));
|
|
|
|
|
assert!("ä".starts_with(""));
|
|
|
|
|
assert!("#ä".starts_with(""));
|
|
|
|
|
assert!("##ä".starts_with(""));
|
|
|
|
|
assert!("ä###".starts_with(""));
|
|
|
|
|
assert!("#ä##".starts_with(""));
|
|
|
|
|
assert!("##ä#".starts_with(""));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn contains_weird_cases() {
|
|
|
|
|
assert!("* \t".contains(' '));
|
|
|
|
|
assert!(!"* \t".contains('?'));
|
|
|
|
|
assert!(!"* \t".contains('\u{1F4A9}'));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn trim_ws() {
|
2018-12-05 14:42:56 +00:00
|
|
|
|
assert_eq!(" \t a \t ".trim_start_matches(|c: char| c.is_whitespace()), "a \t ");
|
|
|
|
|
assert_eq!(" \t a \t ".trim_end_matches(|c: char| c.is_whitespace()), " \t a");
|
2018-06-21 13:04:53 +00:00
|
|
|
|
assert_eq!(" \t a \t ".trim_start_matches(|c: char| c.is_whitespace()), "a \t ");
|
|
|
|
|
assert_eq!(" \t a \t ".trim_end_matches(|c: char| c.is_whitespace()), " \t a");
|
2015-04-01 20:45:00 +00:00
|
|
|
|
assert_eq!(" \t a \t ".trim_matches(|c: char| c.is_whitespace()), "a");
|
2018-12-05 14:42:56 +00:00
|
|
|
|
assert_eq!(" \t \t ".trim_start_matches(|c: char| c.is_whitespace()), "");
|
|
|
|
|
assert_eq!(" \t \t ".trim_end_matches(|c: char| c.is_whitespace()), "");
|
2018-06-21 13:04:53 +00:00
|
|
|
|
assert_eq!(" \t \t ".trim_start_matches(|c: char| c.is_whitespace()), "");
|
|
|
|
|
assert_eq!(" \t \t ".trim_end_matches(|c: char| c.is_whitespace()), "");
|
2015-04-01 20:45:00 +00:00
|
|
|
|
assert_eq!(" \t \t ".trim_matches(|c: char| c.is_whitespace()), "");
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-06 10:34:24 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn to_lowercase() {
|
|
|
|
|
assert_eq!("".to_lowercase(), "");
|
2015-06-08 17:43:04 +00:00
|
|
|
|
assert_eq!("AÉDžaé ".to_lowercase(), "aédžaé ");
|
|
|
|
|
|
2015-06-06 10:34:24 +00:00
|
|
|
|
// https://github.com/rust-lang/rust/issues/26035
|
2015-06-08 17:43:04 +00:00
|
|
|
|
assert_eq!("ΑΣ".to_lowercase(), "ας");
|
|
|
|
|
assert_eq!("Α'Σ".to_lowercase(), "α'ς");
|
|
|
|
|
assert_eq!("Α''Σ".to_lowercase(), "α''ς");
|
|
|
|
|
|
|
|
|
|
assert_eq!("ΑΣ Α".to_lowercase(), "ας α");
|
|
|
|
|
assert_eq!("Α'Σ Α".to_lowercase(), "α'ς α");
|
|
|
|
|
assert_eq!("Α''Σ Α".to_lowercase(), "α''ς α");
|
|
|
|
|
|
|
|
|
|
assert_eq!("ΑΣ' Α".to_lowercase(), "ας' α");
|
|
|
|
|
assert_eq!("ΑΣ'' Α".to_lowercase(), "ας'' α");
|
|
|
|
|
|
|
|
|
|
assert_eq!("Α'Σ' Α".to_lowercase(), "α'ς' α");
|
|
|
|
|
assert_eq!("Α''Σ'' Α".to_lowercase(), "α''ς'' α");
|
|
|
|
|
|
|
|
|
|
assert_eq!("Α Σ".to_lowercase(), "α σ");
|
|
|
|
|
assert_eq!("Α 'Σ".to_lowercase(), "α 'σ");
|
|
|
|
|
assert_eq!("Α ''Σ".to_lowercase(), "α ''σ");
|
|
|
|
|
|
|
|
|
|
assert_eq!("Σ".to_lowercase(), "σ");
|
|
|
|
|
assert_eq!("'Σ".to_lowercase(), "'σ");
|
|
|
|
|
assert_eq!("''Σ".to_lowercase(), "''σ");
|
|
|
|
|
|
|
|
|
|
assert_eq!("ΑΣΑ".to_lowercase(), "ασα");
|
|
|
|
|
assert_eq!("ΑΣ'Α".to_lowercase(), "ασ'α");
|
|
|
|
|
assert_eq!("ΑΣ''Α".to_lowercase(), "ασ''α");
|
2022-05-14 17:44:31 +00:00
|
|
|
|
|
|
|
|
|
// a really long string that has it's lowercase form
|
|
|
|
|
// even longer. this tests that implementations don't assume
|
|
|
|
|
// an incorrect upper bound on allocations
|
|
|
|
|
let upper = str::repeat("İ", 512);
|
|
|
|
|
let lower = str::repeat("i̇", 512);
|
|
|
|
|
assert_eq!(upper.to_lowercase(), lower);
|
|
|
|
|
|
|
|
|
|
// a really long ascii-only string.
|
|
|
|
|
// This test that the ascii hot-path
|
|
|
|
|
// functions correctly
|
|
|
|
|
let upper = str::repeat("A", 511);
|
|
|
|
|
let lower = str::repeat("a", 511);
|
|
|
|
|
assert_eq!(upper.to_lowercase(), lower);
|
2015-06-06 10:34:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn to_uppercase() {
|
|
|
|
|
assert_eq!("".to_uppercase(), "");
|
|
|
|
|
assert_eq!("aéDžßfiᾀ".to_uppercase(), "AÉDŽSSFIἈΙ");
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-10 07:04:06 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_into_string() {
|
|
|
|
|
// The only way to acquire a Box<str> in the first place is through a String, so just
|
|
|
|
|
// test that we can round-trip between Box<str> and String.
|
|
|
|
|
let string = String::from("Some text goes here");
|
2015-08-13 12:02:00 +00:00
|
|
|
|
assert_eq!(string.clone().into_boxed_str().into_string(), string);
|
2015-07-10 07:04:06 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-27 16:06:00 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_box_slice_clone() {
|
|
|
|
|
let data = String::from("hello HELLO hello HELLO yes YES 5 中ä华!!!");
|
2015-08-13 12:02:00 +00:00
|
|
|
|
let data2 = data.clone().into_boxed_str().clone().into_string();
|
2015-07-27 16:06:00 +00:00
|
|
|
|
|
|
|
|
|
assert_eq!(data, data2);
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-03 12:57:25 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_cow_from() {
|
|
|
|
|
let borrowed = "borrowed";
|
|
|
|
|
let owned = String::from("owned");
|
|
|
|
|
match (Cow::from(owned.clone()), Cow::from(borrowed)) {
|
|
|
|
|
(Cow::Owned(o), Cow::Borrowed(b)) => assert!(o == owned && b == borrowed),
|
|
|
|
|
_ => panic!("invalid `Cow::from`"),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-24 17:35:24 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn test_repeat() {
|
|
|
|
|
assert_eq!("".repeat(3), "");
|
|
|
|
|
assert_eq!("abc".repeat(0), "");
|
|
|
|
|
assert_eq!("α".repeat(3), "ααα");
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-01 20:45:00 +00:00
|
|
|
|
mod pattern {
|
2019-02-03 07:27:44 +00:00
|
|
|
|
use std::str::pattern::SearchStep::{self, Done, Match, Reject};
|
|
|
|
|
use std::str::pattern::{Pattern, ReverseSearcher, Searcher};
|
2015-04-01 20:45:00 +00:00
|
|
|
|
|
|
|
|
|
macro_rules! make_test {
|
|
|
|
|
($name:ident, $p:expr, $h:expr, [$($e:expr,)*]) => {
|
2015-06-10 20:33:52 +00:00
|
|
|
|
#[allow(unused_imports)]
|
2015-04-01 20:45:00 +00:00
|
|
|
|
mod $name {
|
|
|
|
|
use std::str::pattern::SearchStep::{Match, Reject};
|
|
|
|
|
use super::{cmp_search_to_vec};
|
|
|
|
|
#[test]
|
|
|
|
|
fn fwd() {
|
|
|
|
|
cmp_search_to_vec(false, $p, $h, vec![$($e),*]);
|
|
|
|
|
}
|
|
|
|
|
#[test]
|
|
|
|
|
fn bwd() {
|
|
|
|
|
cmp_search_to_vec(true, $p, $h, vec![$($e),*]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-31 19:00:35 +00:00
|
|
|
|
fn cmp_search_to_vec<'a>(
|
|
|
|
|
rev: bool,
|
|
|
|
|
pat: impl Pattern<'a, Searcher: ReverseSearcher<'a>>,
|
|
|
|
|
haystack: &'a str,
|
|
|
|
|
right: Vec<SearchStep>,
|
|
|
|
|
) {
|
2015-04-01 20:45:00 +00:00
|
|
|
|
let mut searcher = pat.into_searcher(haystack);
|
|
|
|
|
let mut v = vec![];
|
|
|
|
|
loop {
|
|
|
|
|
match if !rev { searcher.next() } else { searcher.next_back() } {
|
|
|
|
|
Match(a, b) => v.push(Match(a, b)),
|
|
|
|
|
Reject(a, b) => v.push(Reject(a, b)),
|
|
|
|
|
Done => break,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if rev {
|
|
|
|
|
v.reverse();
|
|
|
|
|
}
|
2015-04-01 23:12:58 +00:00
|
|
|
|
|
|
|
|
|
let mut first_index = 0;
|
|
|
|
|
let mut err = None;
|
|
|
|
|
|
|
|
|
|
for (i, e) in right.iter().enumerate() {
|
|
|
|
|
match *e {
|
|
|
|
|
Match(a, b) | Reject(a, b) if a <= b && a == first_index => {
|
|
|
|
|
first_index = b;
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
err = Some(i);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let Some(err) = err {
|
2022-02-12 19:16:17 +00:00
|
|
|
|
panic!("Input skipped range at {err}");
|
2015-04-01 23:12:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if first_index != haystack.len() {
|
|
|
|
|
panic!("Did not cover whole input");
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-01 20:45:00 +00:00
|
|
|
|
assert_eq!(v, right);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
make_test!(
|
|
|
|
|
str_searcher_ascii_haystack,
|
|
|
|
|
"bb",
|
|
|
|
|
"abbcbbd",
|
|
|
|
|
[Reject(0, 1), Match(1, 3), Reject(3, 4), Match(4, 6), Reject(6, 7),]
|
|
|
|
|
);
|
2015-06-14 21:17:17 +00:00
|
|
|
|
make_test!(
|
|
|
|
|
str_searcher_ascii_haystack_seq,
|
|
|
|
|
"bb",
|
|
|
|
|
"abbcbbbbd",
|
|
|
|
|
[Reject(0, 1), Match(1, 3), Reject(3, 4), Match(4, 6), Match(6, 8), Reject(8, 9),]
|
|
|
|
|
);
|
2015-04-01 20:45:00 +00:00
|
|
|
|
make_test!(
|
|
|
|
|
str_searcher_empty_needle_ascii_haystack,
|
|
|
|
|
"",
|
|
|
|
|
"abbcbbd",
|
|
|
|
|
[
|
2015-04-01 23:12:58 +00:00
|
|
|
|
Match(0, 0),
|
|
|
|
|
Reject(0, 1),
|
|
|
|
|
Match(1, 1),
|
|
|
|
|
Reject(1, 2),
|
|
|
|
|
Match(2, 2),
|
|
|
|
|
Reject(2, 3),
|
|
|
|
|
Match(3, 3),
|
|
|
|
|
Reject(3, 4),
|
|
|
|
|
Match(4, 4),
|
|
|
|
|
Reject(4, 5),
|
|
|
|
|
Match(5, 5),
|
|
|
|
|
Reject(5, 6),
|
|
|
|
|
Match(6, 6),
|
|
|
|
|
Reject(6, 7),
|
|
|
|
|
Match(7, 7),
|
2015-04-01 20:45:00 +00:00
|
|
|
|
]
|
|
|
|
|
);
|
2017-11-21 14:33:45 +00:00
|
|
|
|
make_test!(
|
|
|
|
|
str_searcher_multibyte_haystack,
|
|
|
|
|
" ",
|
|
|
|
|
"├──",
|
2015-04-01 20:45:00 +00:00
|
|
|
|
[Reject(0, 3), Reject(3, 6), Reject(6, 9),]
|
|
|
|
|
);
|
2017-11-21 14:33:45 +00:00
|
|
|
|
make_test!(
|
|
|
|
|
str_searcher_empty_needle_multibyte_haystack,
|
|
|
|
|
"",
|
|
|
|
|
"├──",
|
|
|
|
|
[
|
2015-04-01 23:12:58 +00:00
|
|
|
|
Match(0, 0),
|
|
|
|
|
Reject(0, 3),
|
|
|
|
|
Match(3, 3),
|
|
|
|
|
Reject(3, 6),
|
|
|
|
|
Match(6, 6),
|
|
|
|
|
Reject(6, 9),
|
|
|
|
|
Match(9, 9),
|
2015-04-01 20:45:00 +00:00
|
|
|
|
]
|
|
|
|
|
);
|
|
|
|
|
make_test!(str_searcher_empty_needle_empty_haystack, "", "", [Match(0, 0),]);
|
|
|
|
|
make_test!(str_searcher_nonempty_needle_empty_haystack, "├", "", []);
|
|
|
|
|
make_test!(
|
|
|
|
|
char_searcher_ascii_haystack,
|
|
|
|
|
'b',
|
|
|
|
|
"abbcbbd",
|
|
|
|
|
[
|
|
|
|
|
Reject(0, 1),
|
|
|
|
|
Match(1, 2),
|
|
|
|
|
Match(2, 3),
|
|
|
|
|
Reject(3, 4),
|
|
|
|
|
Match(4, 5),
|
|
|
|
|
Match(5, 6),
|
|
|
|
|
Reject(6, 7),
|
|
|
|
|
]
|
|
|
|
|
);
|
2017-11-21 14:33:45 +00:00
|
|
|
|
make_test!(
|
|
|
|
|
char_searcher_multibyte_haystack,
|
|
|
|
|
' ',
|
|
|
|
|
"├──",
|
2015-04-01 20:45:00 +00:00
|
|
|
|
[Reject(0, 3), Reject(3, 6), Reject(6, 9),]
|
|
|
|
|
);
|
|
|
|
|
make_test!(
|
|
|
|
|
char_searcher_short_haystack,
|
|
|
|
|
'\u{1F4A9}',
|
|
|
|
|
"* \t",
|
|
|
|
|
[Reject(0, 1), Reject(1, 2), Reject(2, 3),]
|
|
|
|
|
);
|
2021-07-11 15:47:57 +00:00
|
|
|
|
|
|
|
|
|
// See #85462
|
|
|
|
|
#[test]
|
|
|
|
|
fn str_searcher_empty_needle_after_done() {
|
|
|
|
|
// Empty needle and haystack
|
|
|
|
|
{
|
|
|
|
|
let mut searcher = "".into_searcher("");
|
|
|
|
|
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Match(0, 0));
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Done);
|
|
|
|
|
|
|
|
|
|
let mut searcher = "".into_searcher("");
|
|
|
|
|
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Match(0, 0));
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Done);
|
|
|
|
|
}
|
|
|
|
|
// Empty needle and non-empty haystack
|
|
|
|
|
{
|
|
|
|
|
let mut searcher = "".into_searcher("a");
|
|
|
|
|
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Match(0, 0));
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Reject(0, 1));
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Match(1, 1));
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next(), SearchStep::Done);
|
|
|
|
|
|
|
|
|
|
let mut searcher = "".into_searcher("a");
|
|
|
|
|
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Match(1, 1));
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Reject(0, 1));
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Match(0, 0));
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Done);
|
|
|
|
|
assert_eq!(searcher.next_back(), SearchStep::Done);
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-04-01 20:45:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-04-01 23:12:58 +00:00
|
|
|
|
macro_rules! generate_iterator_test {
|
|
|
|
|
{
|
|
|
|
|
$name:ident {
|
|
|
|
|
$(
|
|
|
|
|
($($arg:expr),*) -> [$($t:tt)*];
|
|
|
|
|
)*
|
|
|
|
|
}
|
|
|
|
|
with $fwd:expr, $bwd:expr;
|
|
|
|
|
} => {
|
|
|
|
|
#[test]
|
|
|
|
|
fn $name() {
|
|
|
|
|
$(
|
|
|
|
|
{
|
|
|
|
|
let res = vec![$($t)*];
|
|
|
|
|
|
|
|
|
|
let fwd_vec: Vec<_> = ($fwd)($($arg),*).collect();
|
|
|
|
|
assert_eq!(fwd_vec, res);
|
|
|
|
|
|
|
|
|
|
let mut bwd_vec: Vec<_> = ($bwd)($($arg),*).collect();
|
|
|
|
|
bwd_vec.reverse();
|
|
|
|
|
assert_eq!(bwd_vec, res);
|
|
|
|
|
}
|
|
|
|
|
)*
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
{
|
|
|
|
|
$name:ident {
|
|
|
|
|
$(
|
|
|
|
|
($($arg:expr),*) -> [$($t:tt)*];
|
|
|
|
|
)*
|
|
|
|
|
}
|
|
|
|
|
with $fwd:expr;
|
|
|
|
|
} => {
|
|
|
|
|
#[test]
|
|
|
|
|
fn $name() {
|
|
|
|
|
$(
|
|
|
|
|
{
|
|
|
|
|
let res = vec![$($t)*];
|
|
|
|
|
|
|
|
|
|
let fwd_vec: Vec<_> = ($fwd)($($arg),*).collect();
|
|
|
|
|
assert_eq!(fwd_vec, res);
|
|
|
|
|
}
|
|
|
|
|
)*
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generate_iterator_test! {
|
|
|
|
|
double_ended_split {
|
|
|
|
|
("foo.bar.baz", '.') -> ["foo", "bar", "baz"];
|
|
|
|
|
("foo::bar::baz", "::") -> ["foo", "bar", "baz"];
|
|
|
|
|
}
|
|
|
|
|
with str::split, str::rsplit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generate_iterator_test! {
|
|
|
|
|
double_ended_split_terminator {
|
|
|
|
|
("foo;bar;baz;", ';') -> ["foo", "bar", "baz"];
|
|
|
|
|
}
|
|
|
|
|
with str::split_terminator, str::rsplit_terminator;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generate_iterator_test! {
|
|
|
|
|
double_ended_matches {
|
|
|
|
|
("a1b2c3", char::is_numeric) -> ["1", "2", "3"];
|
|
|
|
|
}
|
|
|
|
|
with str::matches, str::rmatches;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generate_iterator_test! {
|
|
|
|
|
double_ended_match_indices {
|
2015-09-24 15:38:48 +00:00
|
|
|
|
("a1b2c3", char::is_numeric) -> [(1, "1"), (3, "2"), (5, "3")];
|
2015-04-01 23:12:58 +00:00
|
|
|
|
}
|
|
|
|
|
with str::match_indices, str::rmatch_indices;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generate_iterator_test! {
|
|
|
|
|
not_double_ended_splitn {
|
|
|
|
|
("foo::bar::baz", 2, "::") -> ["foo", "bar::baz"];
|
|
|
|
|
}
|
|
|
|
|
with str::splitn;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
generate_iterator_test! {
|
|
|
|
|
not_double_ended_rsplitn {
|
|
|
|
|
("foo::bar::baz", 2, "::") -> ["baz", "foo::bar"];
|
|
|
|
|
}
|
|
|
|
|
with str::rsplitn;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-01 16:53:51 +00:00
|
|
|
|
#[test]
|
|
|
|
|
fn different_str_pattern_forwarding_lifetimes() {
|
|
|
|
|
use std::str::pattern::Pattern;
|
|
|
|
|
|
|
|
|
|
fn foo<'a, P>(p: P)
|
|
|
|
|
where
|
|
|
|
|
for<'b> &'b P: Pattern<'a>,
|
|
|
|
|
{
|
|
|
|
|
for _ in 0..3 {
|
|
|
|
|
"asdf".find(&p);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
foo::<&str>("x");
|
|
|
|
|
}
|
2020-09-05 11:54:06 +00:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_str_multiline() {
|
|
|
|
|
let a: String = "this \
|
|
|
|
|
is a test"
|
|
|
|
|
.to_string();
|
|
|
|
|
let b: String = "this \
|
|
|
|
|
is \
|
|
|
|
|
another \
|
|
|
|
|
test"
|
|
|
|
|
.to_string();
|
|
|
|
|
assert_eq!(a, "this is a test".to_string());
|
|
|
|
|
assert_eq!(b, "this is another test".to_string());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_str_escapes() {
|
|
|
|
|
let x = "\\\\\
|
|
|
|
|
";
|
|
|
|
|
assert_eq!(x, r"\\"); // extraneous whitespace stripped
|
|
|
|
|
}
|
2020-11-22 08:08:04 +00:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn const_str_ptr() {
|
|
|
|
|
const A: [u8; 2] = ['h' as u8, 'i' as u8];
|
|
|
|
|
const B: &'static [u8; 2] = &A;
|
|
|
|
|
const C: *const u8 = B as *const u8;
|
|
|
|
|
|
2020-12-02 13:09:36 +00:00
|
|
|
|
// Miri does not deduplicate consts (https://github.com/rust-lang/miri/issues/131)
|
|
|
|
|
#[cfg(not(miri))]
|
2020-12-02 12:49:33 +00:00
|
|
|
|
{
|
2020-11-22 08:08:04 +00:00
|
|
|
|
let foo = &A as *const u8;
|
|
|
|
|
assert_eq!(foo, C);
|
2020-12-02 12:49:33 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsafe {
|
2020-11-22 08:08:04 +00:00
|
|
|
|
assert_eq!(from_utf8_unchecked(&A), "hi");
|
|
|
|
|
assert_eq!(*C, A[0]);
|
|
|
|
|
assert_eq!(*(&B[0] as *const u8), A[0]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn utf8() {
|
|
|
|
|
let yen: char = '¥'; // 0xa5
|
|
|
|
|
let c_cedilla: char = 'ç'; // 0xe7
|
|
|
|
|
let thorn: char = 'þ'; // 0xfe
|
|
|
|
|
let y_diaeresis: char = 'ÿ'; // 0xff
|
|
|
|
|
let pi: char = 'Π'; // 0x3a0
|
|
|
|
|
|
|
|
|
|
assert_eq!(yen as isize, 0xa5);
|
|
|
|
|
assert_eq!(c_cedilla as isize, 0xe7);
|
|
|
|
|
assert_eq!(thorn as isize, 0xfe);
|
|
|
|
|
assert_eq!(y_diaeresis as isize, 0xff);
|
|
|
|
|
assert_eq!(pi as isize, 0x3a0);
|
|
|
|
|
|
|
|
|
|
assert_eq!(pi as isize, '\u{3a0}' as isize);
|
|
|
|
|
assert_eq!('\x0a' as isize, '\n' as isize);
|
|
|
|
|
|
|
|
|
|
let bhutan: String = "འབྲུག་ཡུལ།".to_string();
|
|
|
|
|
let japan: String = "日本".to_string();
|
|
|
|
|
let uzbekistan: String = "Ўзбекистон".to_string();
|
|
|
|
|
let austria: String = "Österreich".to_string();
|
|
|
|
|
|
|
|
|
|
let bhutan_e: String =
|
|
|
|
|
"\u{f60}\u{f56}\u{fb2}\u{f74}\u{f42}\u{f0b}\u{f61}\u{f74}\u{f63}\u{f0d}".to_string();
|
|
|
|
|
let japan_e: String = "\u{65e5}\u{672c}".to_string();
|
|
|
|
|
let uzbekistan_e: String =
|
|
|
|
|
"\u{40e}\u{437}\u{431}\u{435}\u{43a}\u{438}\u{441}\u{442}\u{43e}\u{43d}".to_string();
|
|
|
|
|
let austria_e: String = "\u{d6}sterreich".to_string();
|
|
|
|
|
|
|
|
|
|
let oo: char = 'Ö';
|
|
|
|
|
assert_eq!(oo as isize, 0xd6);
|
|
|
|
|
|
|
|
|
|
fn check_str_eq(a: String, b: String) {
|
|
|
|
|
let mut i: isize = 0;
|
|
|
|
|
for ab in a.bytes() {
|
2022-02-12 19:16:17 +00:00
|
|
|
|
println!("{i}");
|
|
|
|
|
println!("{ab}");
|
2020-11-22 08:08:04 +00:00
|
|
|
|
let bb: u8 = b.as_bytes()[i as usize];
|
2022-02-12 19:16:17 +00:00
|
|
|
|
println!("{bb}");
|
2020-11-22 08:08:04 +00:00
|
|
|
|
assert_eq!(ab, bb);
|
|
|
|
|
i += 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
check_str_eq(bhutan, bhutan_e);
|
|
|
|
|
check_str_eq(japan, japan_e);
|
|
|
|
|
check_str_eq(uzbekistan, uzbekistan_e);
|
|
|
|
|
check_str_eq(austria, austria_e);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn utf8_chars() {
|
|
|
|
|
// Chars of 1, 2, 3, and 4 bytes
|
|
|
|
|
let chs: Vec<char> = vec!['e', 'é', '€', '\u{10000}'];
|
|
|
|
|
let s: String = chs.iter().cloned().collect();
|
|
|
|
|
let schs: Vec<char> = s.chars().collect();
|
|
|
|
|
|
|
|
|
|
assert_eq!(s.len(), 10);
|
|
|
|
|
assert_eq!(s.chars().count(), 4);
|
|
|
|
|
assert_eq!(schs.len(), 4);
|
|
|
|
|
assert_eq!(schs.iter().cloned().collect::<String>(), s);
|
|
|
|
|
|
|
|
|
|
assert!((from_utf8(s.as_bytes()).is_ok()));
|
|
|
|
|
// invalid prefix
|
|
|
|
|
assert!((!from_utf8(&[0x80]).is_ok()));
|
|
|
|
|
// invalid 2 byte prefix
|
|
|
|
|
assert!((!from_utf8(&[0xc0]).is_ok()));
|
|
|
|
|
assert!((!from_utf8(&[0xc0, 0x10]).is_ok()));
|
|
|
|
|
// invalid 3 byte prefix
|
|
|
|
|
assert!((!from_utf8(&[0xe0]).is_ok()));
|
|
|
|
|
assert!((!from_utf8(&[0xe0, 0x10]).is_ok()));
|
|
|
|
|
assert!((!from_utf8(&[0xe0, 0xff, 0x10]).is_ok()));
|
|
|
|
|
// invalid 4 byte prefix
|
|
|
|
|
assert!((!from_utf8(&[0xf0]).is_ok()));
|
|
|
|
|
assert!((!from_utf8(&[0xf0, 0x10]).is_ok()));
|
|
|
|
|
assert!((!from_utf8(&[0xf0, 0xff, 0x10]).is_ok()));
|
|
|
|
|
assert!((!from_utf8(&[0xf0, 0xff, 0xff, 0x10]).is_ok()));
|
|
|
|
|
}
|
2021-10-30 10:47:47 +00:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn utf8_char_counts() {
|
|
|
|
|
let strs = [("e", 1), ("é", 1), ("€", 1), ("\u{10000}", 1), ("eé€\u{10000}", 4)];
|
2022-03-31 17:10:53 +00:00
|
|
|
|
let spread = if cfg!(miri) { 4 } else { 8 };
|
|
|
|
|
let mut reps = [8, 64, 256, 512]
|
|
|
|
|
.iter()
|
|
|
|
|
.copied()
|
|
|
|
|
.flat_map(|n| n - spread..=n + spread)
|
|
|
|
|
.collect::<Vec<usize>>();
|
2021-10-30 10:47:47 +00:00
|
|
|
|
if cfg!(not(miri)) {
|
2022-03-31 17:10:53 +00:00
|
|
|
|
reps.extend([1024, 1 << 16].iter().copied().flat_map(|n| n - spread..=n + spread));
|
2021-10-30 10:47:47 +00:00
|
|
|
|
}
|
|
|
|
|
let counts = if cfg!(miri) { 0..1 } else { 0..8 };
|
|
|
|
|
let padding = counts.map(|len| " ".repeat(len)).collect::<Vec<String>>();
|
|
|
|
|
|
|
|
|
|
for repeat in reps {
|
|
|
|
|
for (tmpl_str, tmpl_char_count) in strs {
|
|
|
|
|
for pad_start in &padding {
|
|
|
|
|
for pad_end in &padding {
|
|
|
|
|
// Create a string with padding...
|
|
|
|
|
let with_padding =
|
|
|
|
|
format!("{}{}{}", pad_start, tmpl_str.repeat(repeat), pad_end);
|
|
|
|
|
// ...and then skip past that padding. This should ensure
|
|
|
|
|
// that we test several different alignments for both head
|
|
|
|
|
// and tail.
|
|
|
|
|
let si = pad_start.len();
|
|
|
|
|
let ei = with_padding.len() - pad_end.len();
|
|
|
|
|
let target = &with_padding[si..ei];
|
|
|
|
|
|
|
|
|
|
assert!(!target.starts_with(" ") && !target.ends_with(" "));
|
|
|
|
|
let expected_count = tmpl_char_count * repeat;
|
|
|
|
|
assert_eq!(
|
|
|
|
|
expected_count,
|
|
|
|
|
target.chars().count(),
|
|
|
|
|
"wrong count for `{:?}.repeat({})` (padding: `{:?}`)",
|
|
|
|
|
tmpl_str,
|
|
|
|
|
repeat,
|
|
|
|
|
(pad_start.len(), pad_end.len()),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-06-20 20:24:10 +00:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn floor_char_boundary() {
|
|
|
|
|
fn check_many(s: &str, arg: impl IntoIterator<Item = usize>, ret: usize) {
|
|
|
|
|
for idx in arg {
|
|
|
|
|
assert_eq!(
|
|
|
|
|
s.floor_char_boundary(idx),
|
|
|
|
|
ret,
|
|
|
|
|
"{:?}.floor_char_boundary({:?}) != {:?}",
|
|
|
|
|
s,
|
|
|
|
|
idx,
|
|
|
|
|
ret
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// edge case
|
|
|
|
|
check_many("", [0, 1, isize::MAX as usize, usize::MAX], 0);
|
|
|
|
|
|
|
|
|
|
// basic check
|
|
|
|
|
check_many("x", [0], 0);
|
|
|
|
|
check_many("x", [1, isize::MAX as usize, usize::MAX], 1);
|
|
|
|
|
|
|
|
|
|
// 1-byte chars
|
|
|
|
|
check_many("jp", [0], 0);
|
|
|
|
|
check_many("jp", [1], 1);
|
|
|
|
|
check_many("jp", 2..4, 2);
|
|
|
|
|
|
|
|
|
|
// 2-byte chars
|
|
|
|
|
check_many("ĵƥ", 0..2, 0);
|
|
|
|
|
check_many("ĵƥ", 2..4, 2);
|
|
|
|
|
check_many("ĵƥ", 4..6, 4);
|
|
|
|
|
|
|
|
|
|
// 3-byte chars
|
|
|
|
|
check_many("日本", 0..3, 0);
|
|
|
|
|
check_many("日本", 3..6, 3);
|
|
|
|
|
check_many("日本", 6..8, 6);
|
|
|
|
|
|
|
|
|
|
// 4-byte chars
|
|
|
|
|
check_many("🇯🇵", 0..4, 0);
|
|
|
|
|
check_many("🇯🇵", 4..8, 4);
|
|
|
|
|
check_many("🇯🇵", 8..10, 8);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn ceil_char_boundary() {
|
|
|
|
|
fn check_many(s: &str, arg: impl IntoIterator<Item = usize>, ret: usize) {
|
|
|
|
|
for idx in arg {
|
|
|
|
|
assert_eq!(
|
|
|
|
|
s.ceil_char_boundary(idx),
|
|
|
|
|
ret,
|
|
|
|
|
"{:?}.ceil_char_boundary({:?}) != {:?}",
|
|
|
|
|
s,
|
|
|
|
|
idx,
|
|
|
|
|
ret
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// edge case
|
|
|
|
|
check_many("", [0], 0);
|
|
|
|
|
|
|
|
|
|
// basic check
|
|
|
|
|
check_many("x", [0], 0);
|
|
|
|
|
check_many("x", [1], 1);
|
|
|
|
|
|
|
|
|
|
// 1-byte chars
|
|
|
|
|
check_many("jp", [0], 0);
|
|
|
|
|
check_many("jp", [1], 1);
|
|
|
|
|
check_many("jp", [2], 2);
|
|
|
|
|
|
|
|
|
|
// 2-byte chars
|
|
|
|
|
check_many("ĵƥ", 0..=0, 0);
|
|
|
|
|
check_many("ĵƥ", 1..=2, 2);
|
|
|
|
|
check_many("ĵƥ", 3..=4, 4);
|
|
|
|
|
|
|
|
|
|
// 3-byte chars
|
|
|
|
|
check_many("日本", 0..=0, 0);
|
|
|
|
|
check_many("日本", 1..=3, 3);
|
|
|
|
|
check_many("日本", 4..=6, 6);
|
|
|
|
|
|
|
|
|
|
// 4-byte chars
|
|
|
|
|
check_many("🇯🇵", 0..=0, 0);
|
|
|
|
|
check_many("🇯🇵", 1..=4, 4);
|
|
|
|
|
check_many("🇯🇵", 5..=8, 8);
|
|
|
|
|
|
2023-06-08 13:21:05 +00:00
|
|
|
|
// above len
|
|
|
|
|
check_many("hello", 5..=10, 5);
|
2021-06-20 20:24:10 +00:00
|
|
|
|
}
|