From 1b1ca6d5465ef4de12b1adf25cd4598f261c660d Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Fri, 25 Apr 2014 02:19:34 -0400 Subject: [PATCH 01/11] add back jemalloc to the tree This adds a `std::rt::heap` module with a nice allocator API. It's a step towards fixing #13094 and is a starting point for working on a generic allocator trait. The revision used for the jemalloc submodule is the stable 3.6.0 release. Closes #11807 --- .gitmodules | 3 ++ configure | 1 + mk/crates.mk | 2 +- mk/rt.mk | 52 +++++++++++++++++++++-- mk/tests.mk | 4 +- src/jemalloc | 1 + src/libstd/rt/heap.rs | 97 +++++++++++++++++++++++++++++++++++++++++++ src/libstd/rt/mod.rs | 5 ++- 8 files changed, 158 insertions(+), 7 deletions(-) create mode 160000 src/jemalloc create mode 100644 src/libstd/rt/heap.rs diff --git a/.gitmodules b/.gitmodules index f9da507b72a..37dbb30c82a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -15,3 +15,6 @@ [submodule "src/rt/hoedown"] path = src/rt/hoedown url = https://github.com/rust-lang/hoedown.git +[submodule "src/jemalloc"] + path = src/jemalloc + url = https://github.com/rust-lang/jemalloc.git diff --git a/configure b/configure index d189c8cb6cd..3ab71f762f2 100755 --- a/configure +++ b/configure @@ -782,6 +782,7 @@ do for s in 0 1 2 3 do make_dir $t/rt/stage$s + make_dir $t/rt/jemalloc make_dir $t/rt/libuv make_dir $t/rt/libuv/src/ares make_dir $t/rt/libuv/src/eio diff --git a/mk/crates.mk b/mk/crates.mk index b75b5ba81e2..0437e08de28 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -57,7 +57,7 @@ CRATES := $(TARGET_CRATES) $(HOST_CRATES) TOOLS := compiletest rustdoc rustc DEPS_core := -DEPS_std := core libc native:rustrt native:compiler-rt native:backtrace +DEPS_std := core libc native:rustrt native:compiler-rt native:backtrace native:jemalloc DEPS_green := std rand native:context_switch DEPS_rustuv := std native:uv native:uv_support DEPS_native := std diff --git a/mk/rt.mk b/mk/rt.mk index df47f4a12d9..e4a548dd7bf 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -122,10 +122,13 @@ $(foreach lib,$(NATIVE_LIBS), \ ################################################################################ # Building third-party targets with external build systems # -# The only current member of this section is libuv, but long ago this used to -# also be occupied by jemalloc. This location is meant for dependencies which -# have external build systems. It is still assumed that the output of each of -# these steps is a static library in the correct location. +# This location is meant for dependencies which have external build systems. It +# is still assumed that the output of each of these steps is a static library +# in the correct location. +################################################################################ + +################################################################################ +# libuv ################################################################################ define DEF_LIBUV_ARCH_VAR @@ -154,6 +157,11 @@ define DEF_THIRD_PARTY_TARGETS ifeq ($$(CFG_WINDOWSY_$(1)), 1) LIBUV_OSTYPE_$(1) := win + # This isn't necessarily a desired option, but it's harmless and works around + # what appears to be a mingw-w64 bug. + # + # https://sourceforge.net/p/mingw-w64/bugs/395/ + JEMALLOC_ARGS_$(1) := --enable-lazy-lock else ifeq ($(OSTYPE_$(1)), apple-darwin) LIBUV_OSTYPE_$(1) := mac else ifeq ($(OSTYPE_$(1)), unknown-freebsd) @@ -161,6 +169,7 @@ else ifeq ($(OSTYPE_$(1)), unknown-freebsd) else ifeq ($(OSTYPE_$(1)), linux-androideabi) LIBUV_OSTYPE_$(1) := android LIBUV_ARGS_$(1) := PLATFORM=android host=android OS=linux + JEMALLOC_ARGS_$(1) := --disable-tls else LIBUV_OSTYPE_$(1) := linux endif @@ -220,6 +229,41 @@ $$(LIBUV_DIR_$(1))/Release/libuv.a: $$(LIBUV_DEPS) $$(LIBUV_MAKEFILE_$(1)) \ endif +################################################################################ +# jemalloc +################################################################################ + +ifdef CFG_ENABLE_FAST_MAKE +JEMALLOC_DEPS := $(S)/.gitmodules +else +JEMALLOC_DEPS := $(wildcard \ + $(S)src/jemalloc/* \ + $(S)src/jemalloc/*/* \ + $(S)src/jemalloc/*/*/* \ + $(S)src/jemalloc/*/*/*/*) +endif + +JEMALLOC_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc) +ifeq ($$(CFG_WINDOWSY_$(1)),1) + JEMALLOC_REAL_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc_s) +else + JEMALLOC_REAL_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc_pic) +endif +JEMALLOC_LIB_$(1) := $$(RT_OUTPUT_DIR_$(1))/$$(JEMALLOC_NAME_$(1)) +JEMALLOC_BUILD_DIR_$(1) := $$(RT_OUTPUT_DIR_$(1))/jemalloc + +$$(JEMALLOC_LIB_$(1)): $$(JEMALLOC_DEPS) $$(MKFILE_DEPS) + @$$(call E, make: jemalloc) + cd "$$(JEMALLOC_BUILD_DIR_$(1))"; "$(S)src/jemalloc/configure" \ + $$(JEMALLOC_ARGS_$(1)) --enable-cc-silence --with-jemalloc-prefix=je_ \ + --disable-experimental --build=$(CFG_BUILD) --host=$(1) \ + CC="$$(CC_$(1))" \ + AR="$$(AR_$(1))" \ + RANLIB="$$(AR_$(1)) s" \ + EXTRA_CFLAGS="$$(CFG_GCCISH_CFLAGS)" + $$(Q)$$(MAKE) -C "$$(JEMALLOC_BUILD_DIR_$(1))" build_lib_static + $$(Q)cp $$(JEMALLOC_BUILD_DIR_$(1))/lib/$$(JEMALLOC_REAL_NAME_$(1)) $$(JEMALLOC_LIB_$(1)) + ################################################################################ # compiler-rt ################################################################################ diff --git a/mk/tests.mk b/mk/tests.mk index 012ec0e862d..71d56d11a73 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -240,6 +240,7 @@ ALL_HS := $(filter-out $(S)src/rt/vg/valgrind.h \ tidy: @$(call E, check: formatting) $(Q)find $(S)src -name '*.r[sc]' \ + | grep '^$(S)src/jemalloc' -v \ | grep '^$(S)src/libuv' -v \ | grep '^$(S)src/llvm' -v \ | grep '^$(S)src/gyp' -v \ @@ -264,8 +265,9 @@ tidy: $(Q)find $(S)src -type f -perm +111 \ -not -name '*.rs' -and -not -name '*.py' \ -and -not -name '*.sh' \ - | grep '^$(S)src/llvm' -v \ + | grep '^$(S)src/jemalloc' -v \ | grep '^$(S)src/libuv' -v \ + | grep '^$(S)src/llvm' -v \ | grep '^$(S)src/rt/hoedown' -v \ | grep '^$(S)src/gyp' -v \ | grep '^$(S)src/etc' -v \ diff --git a/src/jemalloc b/src/jemalloc new file mode 160000 index 00000000000..6a96910f2ea --- /dev/null +++ b/src/jemalloc @@ -0,0 +1 @@ +Subproject commit 6a96910f2eaea6d2c705bb12379b23576b30d7d5 diff --git a/src/libstd/rt/heap.rs b/src/libstd/rt/heap.rs new file mode 100644 index 00000000000..b4b44fbf5c7 --- /dev/null +++ b/src/libstd/rt/heap.rs @@ -0,0 +1,97 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use intrinsics::{abort, cttz32}; +use libc::{c_int, c_void, size_t}; +use ptr::RawPtr; + +#[link(name = "jemalloc", kind = "static")] +extern { + fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void; + fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; + fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; + fn je_dallocx(ptr: *mut c_void, flags: c_int); + fn je_nallocx(size: size_t, flags: c_int) -> size_t; +} + +// -lpthread needs to occur after -ljemalloc, the earlier argument isn't enough +#[cfg(not(windows))] +#[link(name = "pthread")] +extern {} + +// MALLOCX_ALIGN(a) macro +#[inline(always)] +fn mallocx_align(a: uint) -> c_int { unsafe { cttz32(a as u32) as c_int } } + +/// Return a pointer to `size` bytes of memory. +/// +/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The +/// alignment must be no larger than the largest supported page size on the platform. +#[inline] +pub unsafe fn allocate(size: uint, align: uint) -> *mut u8 { + let ptr = je_mallocx(size as size_t, mallocx_align(align)) as *mut u8; + if ptr.is_null() { + abort() + } + ptr +} + +/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory. +/// +/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The +/// alignment must be no larger than the largest supported page size on the platform. +/// +/// The `old_size` and `align` parameters are the parameters that were used to create the +/// allocation referenced by `ptr`. The `old_size` parameter may also be the value returned by +/// `usable_size` for the requested size. +#[inline] +#[allow(unused_variable)] // for the parameter names in the documentation +pub unsafe fn reallocate(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 { + let ptr = je_rallocx(ptr as *mut c_void, size as size_t, mallocx_align(align)) as *mut u8; + if ptr.is_null() { + abort() + } + ptr +} + +/// Extend or shrink the allocation referenced by `ptr` to `size` bytes of memory in-place. +/// +/// Return true if successful, otherwise false if the allocation was not altered. +/// +/// Behavior is undefined if the requested size is 0 or the alignment is not a power of 2. The +/// alignment must be no larger than the largest supported page size on the platform. +/// +/// The `old_size` and `align` parameters are the parameters that were used to +/// create the allocation referenced by `ptr`. The `old_size` parameter may be +/// any value in range_inclusive(requested_size, usable_size). +#[inline] +#[allow(unused_variable)] // for the parameter names in the documentation +pub unsafe fn reallocate_inplace(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> bool { + je_xallocx(ptr as *mut c_void, size as size_t, 0, mallocx_align(align)) == size as size_t +} + +/// Deallocate the memory referenced by `ptr`. +/// +/// The `ptr` parameter must not be null. +/// +/// The `size` and `align` parameters are the parameters that were used to create the +/// allocation referenced by `ptr`. The `size` parameter may also be the value returned by +/// `usable_size` for the requested size. +#[inline] +#[allow(unused_variable)] // for the parameter names in the documentation +pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) { + je_dallocx(ptr as *mut c_void, mallocx_align(align)) +} + +/// Return the usable size of an allocation created with the specified the `size` and `align`. +#[inline] +pub fn usable_size(size: uint, align: uint) -> uint { + unsafe { je_nallocx(size as size_t, mallocx_align(align)) as uint } +} diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 5b9c314d42b..904921cfa18 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -89,7 +89,10 @@ mod macros; // The global (exchange) heap. pub mod global_heap; -// Implementations of language-critical runtime features like @. +/// The low-level memory allocation API. +pub mod heap; + +/// Implementations of language-critical runtime features like @. pub mod task; // The EventLoop and internal synchronous I/O interface. From 03a5eb4b5295ada37e1e42ad1299857fcb8e81e9 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Fri, 25 Apr 2014 21:24:51 -0400 Subject: [PATCH 02/11] add an align parameter to exchange_malloc Closes #13094 --- src/libarena/lib.rs | 31 ++++++++++++++++++--- src/libcore/should_not_exist.rs | 26 ++++++++++++++---- src/librustc/middle/trans/base.rs | 5 ++-- src/librustc/middle/trans/expr.rs | 5 ++-- src/librustc/middle/trans/tvec.rs | 4 ++- src/libstd/rt/global_heap.rs | 45 ++++++++++++++++++++++++++++++- 6 files changed, 101 insertions(+), 15 deletions(-) diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 24e7a65e02a..ec9d4eaed9e 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -33,6 +33,7 @@ use std::cmp; use std::intrinsics::{TyDesc, get_tydesc}; use std::intrinsics; use std::mem; +use std::mem::min_align_of; use std::num; use std::ptr::read; use std::rc::Rc; @@ -204,7 +205,7 @@ impl Arena { #[inline] fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T { unsafe { - let ptr = self.alloc_copy_inner(mem::size_of::(), mem::min_align_of::()); + let ptr = self.alloc_copy_inner(mem::size_of::(), min_align_of::()); let ptr: *mut T = transmute(ptr); mem::move_val_init(&mut (*ptr), op()); return transmute(ptr); @@ -261,7 +262,7 @@ impl Arena { unsafe { let tydesc = get_tydesc::(); let (ty_ptr, ptr) = - self.alloc_noncopy_inner(mem::size_of::(), mem::min_align_of::()); + self.alloc_noncopy_inner(mem::size_of::(), min_align_of::()); let ty_ptr: *mut uint = transmute(ty_ptr); let ptr: *mut T = transmute(ptr); // Write in our tydesc along with a bit indicating that it @@ -353,7 +354,29 @@ struct TypedArenaChunk { } impl TypedArenaChunk { + #[cfg(stage0)] #[inline] + fn new(next: Option>>, capacity: uint) + -> Box> { + let mut size = mem::size_of::>(); + size = round_up(size, min_align_of::()); + let elem_size = mem::size_of::(); + let elems_size = elem_size.checked_mul(&capacity).unwrap(); + size = size.checked_add(&elems_size).unwrap(); + + let mut chunk = unsafe { + let chunk = global_heap::exchange_malloc(size); + let mut chunk: Box> = cast::transmute(chunk); + mem::move_val_init(&mut chunk.next, next); + chunk + }; + + chunk.capacity = capacity; + chunk + } + + #[inline] + #[cfg(not(stage0))] fn new(next: Option>>, capacity: uint) -> Box> { let mut size = mem::size_of::>(); @@ -363,7 +386,7 @@ impl TypedArenaChunk { size = size.checked_add(&elems_size).unwrap(); let mut chunk = unsafe { - let chunk = global_heap::exchange_malloc(size); + let chunk = global_heap::exchange_malloc(size, min_align_of::>()); let mut chunk: Box> = cast::transmute(chunk); mem::move_val_init(&mut chunk.next, next); chunk @@ -402,7 +425,7 @@ impl TypedArenaChunk { fn start(&self) -> *u8 { let this: *TypedArenaChunk = self; unsafe { - cast::transmute(round_up(this.offset(1) as uint, mem::min_align_of::())) + cast::transmute(round_up(this.offset(1) as uint, min_align_of::())) } } diff --git a/src/libcore/should_not_exist.rs b/src/libcore/should_not_exist.rs index 0e6baaa518a..74bebc921e3 100644 --- a/src/libcore/should_not_exist.rs +++ b/src/libcore/should_not_exist.rs @@ -29,13 +29,29 @@ use str::StrSlice; #[allow(ctypes)] extern { - fn malloc(size: uint) -> *u8; - fn free(ptr: *u8); + #[cfg(stage0)] + fn rust_malloc(size: uint) -> *u8; + #[cfg(not(stage0))] + fn rust_malloc(size: uint, align: uint) -> *u8; + fn rust_free(ptr: *u8); } +#[cfg(stage0)] unsafe fn alloc(cap: uint) -> *mut Vec<()> { let cap = cap.checked_add(&mem::size_of::>()).unwrap(); - let ret = malloc(cap) as *mut Vec<()>; + let ret = rust_malloc(cap) as *mut Vec<()>; + if ret.is_null() { + intrinsics::abort(); + } + (*ret).fill = 0; + (*ret).alloc = cap; + ret +} + +#[cfg(not(stage0))] +unsafe fn alloc(cap: uint) -> *mut Vec<()> { + let cap = cap.checked_add(&mem::size_of::>()).unwrap(); + let ret = rust_malloc(cap, 8) as *mut Vec<()>; if ret.is_null() { intrinsics::abort(); } @@ -102,7 +118,7 @@ impl FromIterator for ~str { ptr::copy_nonoverlapping_memory(&mut (*ptr2).data, &(*ptr).data, len); - free(ptr as *u8); + rust_free(ptr as *u8); cast::forget(ret); ret = cast::transmute(ptr2); ptr = ptr2; @@ -172,7 +188,7 @@ impl Clone for ~[A] { for j in range(0, *i as int) { ptr::read(&*p.offset(j)); } - free(ret as *u8); + rust_free(ret as *u8); }); cast::transmute(ret) } diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index 6c4566c09aa..8f878f099fc 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -345,7 +345,8 @@ fn require_alloc_fn(bcx: &Block, info_ty: ty::t, it: LangItem) -> ast::DefId { pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>, ptr_ty: ty::t, - size: ValueRef) + size: ValueRef, + align: ValueRef) -> Result<'a> { let _icx = push_ctxt("malloc_raw_exchange"); let ccx = bcx.ccx(); @@ -353,7 +354,7 @@ pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>, // Allocate space: let r = callee::trans_lang_call(bcx, require_alloc_fn(bcx, ptr_ty, ExchangeMallocFnLangItem), - [size], + [size, align], None); let llty_ptr = type_of::type_of(ccx, ptr_ty); diff --git a/src/librustc/middle/trans/expr.rs b/src/librustc/middle/trans/expr.rs index 806fb3e125d..463aa67ac90 100644 --- a/src/librustc/middle/trans/expr.rs +++ b/src/librustc/middle/trans/expr.rs @@ -67,7 +67,7 @@ use middle::typeck::MethodCall; use util::common::indenter; use util::ppaux::Repr; use util::nodemap::NodeMap; -use middle::trans::machine::{llsize_of, llsize_of_alloc}; +use middle::trans::machine::{llalign_of_min, llsize_of, llsize_of_alloc}; use middle::trans::type_::Type; use syntax::ast; @@ -1170,10 +1170,11 @@ fn trans_uniq_expr<'a>(bcx: &'a Block<'a>, let fcx = bcx.fcx; let llty = type_of::type_of(bcx.ccx(), contents_ty); let size = llsize_of(bcx.ccx(), llty); + let align = C_uint(bcx.ccx(), llalign_of_min(bcx.ccx(), llty) as uint); // We need to a make a pointer type because box_ty is ty_bot // if content_ty is, e.g. box fail!(). let real_box_ty = ty::mk_uniq(bcx.tcx(), contents_ty); - let Result { bcx, val } = malloc_raw_dyn(bcx, real_box_ty, size); + let Result { bcx, val } = malloc_raw_dyn(bcx, real_box_ty, size, align); // Unique boxes do not allocate for zero-size types. The standard library // may assume that `free` is never called on the pointer returned for // `Box`. diff --git a/src/librustc/middle/trans/tvec.rs b/src/librustc/middle/trans/tvec.rs index e210437a370..48179abc052 100644 --- a/src/librustc/middle/trans/tvec.rs +++ b/src/librustc/middle/trans/tvec.rs @@ -278,7 +278,9 @@ pub fn trans_uniq_vstore<'a>(bcx: &'a Block<'a>, let vecsize = Add(bcx, alloc, llsize_of(ccx, ccx.opaque_vec_type)); - let Result { bcx: bcx, val: val } = malloc_raw_dyn(bcx, vec_ty, vecsize); + // ~[T] is not going to be changed to support alignment, since it's obsolete. + let align = C_uint(ccx, 8); + let Result { bcx: bcx, val: val } = malloc_raw_dyn(bcx, vec_ty, vecsize, align); Store(bcx, fill, GEPi(bcx, val, [0u, abi::vec_elt_fill])); Store(bcx, alloc, GEPi(bcx, val, [0u, abi::vec_elt_alloc])); diff --git a/src/libstd/rt/global_heap.rs b/src/libstd/rt/global_heap.rs index 7d54c3faf42..c8808b6e821 100644 --- a/src/libstd/rt/global_heap.rs +++ b/src/libstd/rt/global_heap.rs @@ -68,7 +68,7 @@ pub unsafe fn realloc_raw(ptr: *mut u8, size: uint) -> *mut u8 { } /// The allocator for unique pointers without contained managed pointers. -#[cfg(not(test))] +#[cfg(not(test), stage0)] #[lang="exchange_malloc"] #[inline] pub unsafe fn exchange_malloc(size: uint) -> *mut u8 { @@ -85,6 +85,23 @@ pub unsafe fn exchange_malloc(size: uint) -> *mut u8 { } } +/// The allocator for unique pointers without contained managed pointers. +#[cfg(not(test), not(stage0))] +#[lang="exchange_malloc"] +#[inline] +pub unsafe fn exchange_malloc(size: uint, _align: uint) -> *mut u8 { + // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size + // allocations can point to this `static`. It would be incorrect to use a null + // pointer, due to enums assuming types like unique pointers are never null. + static EMPTY: () = (); + + if size == 0 { + &EMPTY as *() as *mut u8 + } else { + malloc_raw(size) + } +} + // FIXME: #7496 #[cfg(not(test))] #[lang="closure_exchange_malloc"] @@ -118,6 +135,32 @@ pub unsafe fn exchange_free(ptr: *u8) { free(ptr as *mut c_void); } +// hack for libcore +#[no_mangle] +#[doc(hidden)] +#[deprecated] +#[cfg(stage0)] +pub extern "C" fn rust_malloc(size: uint) -> *mut u8 { + unsafe { exchange_malloc(size) } +} + +// hack for libcore +#[no_mangle] +#[doc(hidden)] +#[deprecated] +#[cfg(not(stage0))] +pub extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 { + unsafe { exchange_malloc(size, align) } +} + +// hack for libcore +#[no_mangle] +#[doc(hidden)] +#[deprecated] +pub extern "C" fn rust_free(ptr: *u8) { + unsafe { exchange_free(ptr) } +} + #[cfg(test)] mod bench { extern crate test; From aaf6e06b01c4f7490e71693d3c96f466032e80d0 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Tue, 6 May 2014 17:01:16 -0400 Subject: [PATCH 03/11] use jemalloc to implement Vec --- src/libstd/rt/heap.rs | 3 ++ src/libstd/vec.rs | 73 +++++++++++++++++++++++++++---------------- 2 files changed, 49 insertions(+), 27 deletions(-) diff --git a/src/libstd/rt/heap.rs b/src/libstd/rt/heap.rs index b4b44fbf5c7..1b4b1e444fe 100644 --- a/src/libstd/rt/heap.rs +++ b/src/libstd/rt/heap.rs @@ -8,6 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// FIXME: #13994: port to the sized deallocation API when available +// FIXME: #13996: need a way to mark the `allocate` and `reallocate` return values as `noalias` + use intrinsics::{abort, cttz32}; use libc::{c_int, c_void, size_t}; use ptr::RawPtr; diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index da01da26709..28035c32f8e 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -12,13 +12,12 @@ use cast::{forget, transmute}; use clone::Clone; -use cmp::{Ord, Eq, Ordering, TotalEq, TotalOrd}; +use cmp::{Ord, Eq, Ordering, TotalEq, TotalOrd, max}; use container::{Container, Mutable}; use default::Default; use fmt; use iter::{DoubleEndedIterator, FromIterator, Extendable, Iterator, range}; -use libc::{free, c_void}; -use mem::{size_of, move_val_init}; +use mem::{min_align_of, move_val_init, size_of}; use mem; use num; use num::{CheckedMul, CheckedAdd}; @@ -26,9 +25,9 @@ use ops::{Add, Drop}; use option::{None, Option, Some, Expect}; use ptr::RawPtr; use ptr; -use rt::global_heap::{malloc_raw, realloc_raw}; use raw::Slice; use RawVec = raw::Vec; +use rt::heap::{allocate, reallocate, deallocate}; use slice::{ImmutableEqVector, ImmutableVector, Items, MutItems, MutableVector}; use slice::{MutableTotalOrdVector, OwnedVector, Vector}; use slice::{MutableVectorAllocating}; @@ -96,7 +95,7 @@ impl Vec { Vec::new() } else { let size = capacity.checked_mul(&size_of::()).expect("capacity overflow"); - let ptr = unsafe { malloc_raw(size) }; + let ptr = unsafe { allocate(size, min_align_of::()) }; Vec { len: 0, cap: capacity, ptr: ptr as *mut T } } } @@ -401,6 +400,16 @@ impl Container for Vec { } } +// FIXME: #13996: need a way to mark the return value as `noalias` +#[inline(never)] +unsafe fn alloc_or_realloc(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 { + if old_size == 0 { + allocate(size, align) + } else { + reallocate(ptr, size, align, old_size) + } +} + impl Vec { /// Returns the number of elements the vector can hold without /// reallocating. @@ -479,31 +488,35 @@ impl Vec { pub fn reserve_exact(&mut self, capacity: uint) { if capacity > self.cap { let size = capacity.checked_mul(&size_of::()).expect("capacity overflow"); - self.cap = capacity; unsafe { - self.ptr = realloc_raw(self.ptr as *mut u8, size) as *mut T; + self.ptr = alloc_or_realloc(self.ptr as *mut u8, size, min_align_of::(), + self.cap * size_of::()) as *mut T; } + self.cap = capacity; } } - /// Shrink the capacity of the vector to match the length + /// Shrink the capacity of the vector as much as possible /// /// # Example /// /// ```rust /// let mut vec = vec!(1, 2, 3); /// vec.shrink_to_fit(); - /// assert_eq!(vec.capacity(), vec.len()); /// ``` pub fn shrink_to_fit(&mut self) { if self.len == 0 { - unsafe { free(self.ptr as *mut c_void) }; - self.cap = 0; - self.ptr = 0 as *mut T; + if self.cap != 0 { + unsafe { + deallocate(self.ptr as *mut u8, self.cap * size_of::(), min_align_of::()) + } + self.cap = 0; + } } else { unsafe { // Overflow check is unnecessary as the vector is already at least this large. - self.ptr = realloc_raw(self.ptr as *mut u8, self.len * size_of::()) as *mut T; + self.ptr = reallocate(self.ptr as *mut u8, self.len * size_of::(), + min_align_of::(), self.cap * size_of::()) as *mut T; } self.cap = self.len; } @@ -547,14 +560,14 @@ impl Vec { #[inline] pub fn push(&mut self, value: T) { if self.len == self.cap { - if self.cap == 0 { self.cap += 2 } let old_size = self.cap * size_of::(); - self.cap = self.cap * 2; - let size = old_size * 2; + let size = max(old_size, 2 * size_of::()) * 2; if old_size > size { fail!("capacity overflow") } unsafe { - self.ptr = realloc_raw(self.ptr as *mut u8, size) as *mut T; + self.ptr = alloc_or_realloc(self.ptr as *mut u8, size, min_align_of::(), + self.cap * size_of::()) as *mut u8 as *mut T; } + self.cap = max(self.cap, 2) * 2; } unsafe { @@ -638,9 +651,10 @@ impl Vec { pub fn move_iter(self) -> MoveItems { unsafe { let iter = transmute(self.as_slice().iter()); - let ptr = self.ptr as *mut c_void; + let ptr = self.ptr as *mut u8; + let cap = self.cap; forget(self); - MoveItems { allocation: ptr, iter: iter } + MoveItems { allocation: ptr, cap: cap, iter: iter } } } @@ -1386,11 +1400,13 @@ impl Drop for Vec { fn drop(&mut self) { // This is (and should always remain) a no-op if the fields are // zeroed (when moving out, because of #[unsafe_no_drop_flag]). - unsafe { - for x in self.as_mut_slice().iter() { - ptr::read(x); + if self.cap != 0 { + unsafe { + for x in self.as_mut_slice().iter() { + ptr::read(x); + } + deallocate(self.ptr as *mut u8, self.cap * size_of::(), min_align_of::()) } - free(self.ptr as *mut c_void) } } } @@ -1409,7 +1425,8 @@ impl fmt::Show for Vec { /// An iterator that moves out of a vector. pub struct MoveItems { - allocation: *mut c_void, // the block of memory allocated for the vector + allocation: *mut u8, // the block of memory allocated for the vector + cap: uint, // the capacity of the vector iter: Items<'static, T> } @@ -1440,9 +1457,11 @@ impl DoubleEndedIterator for MoveItems { impl Drop for MoveItems { fn drop(&mut self) { // destroy the remaining elements - for _x in *self {} - unsafe { - free(self.allocation) + if self.cap != 0 { + for _x in *self {} + unsafe { + deallocate(self.allocation, self.cap * size_of::(), min_align_of::()) + } } } } From 138437956c9ab78aede9bb698aa80f9367b3b75a Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Tue, 6 May 2014 22:03:14 -0400 Subject: [PATCH 04/11] initial port of the exchange allocator to jemalloc In stage0, all allocations are 8-byte aligned. Passing a size and alignment to free is not yet implemented everywhere (0 size and 8 align are used as placeholders). Fixing this is part of #13994. Closes #13616 --- src/libarena/lib.rs | 6 +- src/libcore/should_not_exist.rs | 8 +- src/libstd/lib.rs | 1 + src/libstd/rc.rs | 9 ++- src/libstd/rt/global_heap.rs | 131 -------------------------------- src/libstd/rt/heap.rs | 122 +++++++++++++++++++++++++++++ src/libstd/rt/local_heap.rs | 5 +- src/libstd/rt/util.rs | 17 +++++ src/libstd/slice.rs | 60 +++++++++++++-- src/libstd/vec.rs | 2 +- src/libsync/arc.rs | 9 ++- 11 files changed, 219 insertions(+), 151 deletions(-) diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index ec9d4eaed9e..526b72e9b86 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -37,7 +37,7 @@ use std::mem::min_align_of; use std::num; use std::ptr::read; use std::rc::Rc; -use std::rt::global_heap; +use std::rt::heap::exchange_malloc; // The way arena uses arrays is really deeply awful. The arrays are // allocated, and have capacities reserved, but the fill for the array @@ -365,7 +365,7 @@ impl TypedArenaChunk { size = size.checked_add(&elems_size).unwrap(); let mut chunk = unsafe { - let chunk = global_heap::exchange_malloc(size); + let chunk = exchange_malloc(size); let mut chunk: Box> = cast::transmute(chunk); mem::move_val_init(&mut chunk.next, next); chunk @@ -386,7 +386,7 @@ impl TypedArenaChunk { size = size.checked_add(&elems_size).unwrap(); let mut chunk = unsafe { - let chunk = global_heap::exchange_malloc(size, min_align_of::>()); + let chunk = exchange_malloc(size, min_align_of::>()); let mut chunk: Box> = cast::transmute(chunk); mem::move_val_init(&mut chunk.next, next); chunk diff --git a/src/libcore/should_not_exist.rs b/src/libcore/should_not_exist.rs index 74bebc921e3..f199aa051d1 100644 --- a/src/libcore/should_not_exist.rs +++ b/src/libcore/should_not_exist.rs @@ -33,7 +33,7 @@ extern { fn rust_malloc(size: uint) -> *u8; #[cfg(not(stage0))] fn rust_malloc(size: uint, align: uint) -> *u8; - fn rust_free(ptr: *u8); + fn rust_free(ptr: *u8, size: uint, align: uint); } #[cfg(stage0)] @@ -51,6 +51,7 @@ unsafe fn alloc(cap: uint) -> *mut Vec<()> { #[cfg(not(stage0))] unsafe fn alloc(cap: uint) -> *mut Vec<()> { let cap = cap.checked_add(&mem::size_of::>()).unwrap(); + // this should use the real alignment, but the new representation will take care of that let ret = rust_malloc(cap, 8) as *mut Vec<()>; if ret.is_null() { intrinsics::abort(); @@ -118,7 +119,8 @@ impl FromIterator for ~str { ptr::copy_nonoverlapping_memory(&mut (*ptr2).data, &(*ptr).data, len); - rust_free(ptr as *u8); + // FIXME: #13994: port to the sized deallocation API when available + rust_free(ptr as *u8, 0, 8); cast::forget(ret); ret = cast::transmute(ptr2); ptr = ptr2; @@ -188,7 +190,7 @@ impl Clone for ~[A] { for j in range(0, *i as int) { ptr::read(&*p.offset(j)); } - rust_free(ret as *u8); + rust_free(ret as *u8, 0, 8); }); cast::transmute(ret) } diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 0ac6f1dba4f..3f22a76c1f4 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -110,6 +110,7 @@ // Don't link to std. We are std. #![no_std] +#![allow(deprecated)] #![deny(missing_doc)] // When testing libstd, bring in libuv as the I/O backend so tests can print diff --git a/src/libstd/rc.rs b/src/libstd/rc.rs index 51ab885a85f..e0fe75fd907 100644 --- a/src/libstd/rc.rs +++ b/src/libstd/rc.rs @@ -32,7 +32,8 @@ use ops::{Deref, Drop}; use option::{Option, Some, None}; use ptr; use ptr::RawPtr; -use rt::global_heap::exchange_free; +use mem::{min_align_of, size_of}; +use rt::heap::exchange_free; struct RcBox { value: T, @@ -104,7 +105,8 @@ impl Drop for Rc { self.dec_weak(); if self.weak() == 0 { - exchange_free(self.ptr as *u8) + exchange_free(self.ptr as *mut u8, size_of::>(), + min_align_of::>()) } } } @@ -177,7 +179,8 @@ impl Drop for Weak { // the weak count starts at 1, and will only go to // zero if all the strong pointers have disappeared. if self.weak() == 0 { - exchange_free(self.ptr as *u8) + exchange_free(self.ptr as *mut u8, size_of::>(), + min_align_of::>()) } } } diff --git a/src/libstd/rt/global_heap.rs b/src/libstd/rt/global_heap.rs index c8808b6e821..ece51ab9989 100644 --- a/src/libstd/rt/global_heap.rs +++ b/src/libstd/rt/global_heap.rs @@ -14,23 +14,6 @@ use libc::{c_void, size_t, free, malloc, realloc}; use ptr::{RawPtr, mut_null}; use intrinsics::abort; -use raw; -use mem::size_of; - -#[inline] -pub fn get_box_size(body_size: uint, body_align: uint) -> uint { - let header_size = size_of::>(); - let total_size = align_to(header_size, body_align) + body_size; - total_size -} - -// Rounds |size| to the nearest |alignment|. Invariant: |alignment| is a power -// of two. -#[inline] -fn align_to(size: uint, align: uint) -> uint { - assert!(align != 0); - (size + align - 1) & !(align - 1) -} /// A wrapper around libc::malloc, aborting on out-of-memory #[inline] @@ -66,117 +49,3 @@ pub unsafe fn realloc_raw(ptr: *mut u8, size: uint) -> *mut u8 { p as *mut u8 } } - -/// The allocator for unique pointers without contained managed pointers. -#[cfg(not(test), stage0)] -#[lang="exchange_malloc"] -#[inline] -pub unsafe fn exchange_malloc(size: uint) -> *mut u8 { - // The compiler never calls `exchange_free` on Box, so - // zero-size allocations can point to this `static`. It would be incorrect - // to use a null pointer, due to enums assuming types like unique pointers - // are never null. - static EMPTY: () = (); - - if size == 0 { - &EMPTY as *() as *mut u8 - } else { - malloc_raw(size) - } -} - -/// The allocator for unique pointers without contained managed pointers. -#[cfg(not(test), not(stage0))] -#[lang="exchange_malloc"] -#[inline] -pub unsafe fn exchange_malloc(size: uint, _align: uint) -> *mut u8 { - // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size - // allocations can point to this `static`. It would be incorrect to use a null - // pointer, due to enums assuming types like unique pointers are never null. - static EMPTY: () = (); - - if size == 0 { - &EMPTY as *() as *mut u8 - } else { - malloc_raw(size) - } -} - -// FIXME: #7496 -#[cfg(not(test))] -#[lang="closure_exchange_malloc"] -#[inline] -pub unsafe fn closure_exchange_malloc_(drop_glue: fn(*mut u8), size: uint, align: uint) -> *u8 { - closure_exchange_malloc(drop_glue, size, align) -} - -#[inline] -pub unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uint) -> *u8 { - let total_size = get_box_size(size, align); - let p = malloc_raw(total_size); - - let alloc = p as *mut raw::Box<()>; - (*alloc).drop_glue = drop_glue; - - alloc as *u8 -} - -// NB: Calls to free CANNOT be allowed to fail, as throwing an exception from -// inside a landing pad may corrupt the state of the exception handler. -#[cfg(not(test))] -#[lang="exchange_free"] -#[inline] -pub unsafe fn exchange_free_(ptr: *u8) { - exchange_free(ptr) -} - -#[inline] -pub unsafe fn exchange_free(ptr: *u8) { - free(ptr as *mut c_void); -} - -// hack for libcore -#[no_mangle] -#[doc(hidden)] -#[deprecated] -#[cfg(stage0)] -pub extern "C" fn rust_malloc(size: uint) -> *mut u8 { - unsafe { exchange_malloc(size) } -} - -// hack for libcore -#[no_mangle] -#[doc(hidden)] -#[deprecated] -#[cfg(not(stage0))] -pub extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 { - unsafe { exchange_malloc(size, align) } -} - -// hack for libcore -#[no_mangle] -#[doc(hidden)] -#[deprecated] -pub extern "C" fn rust_free(ptr: *u8) { - unsafe { exchange_free(ptr) } -} - -#[cfg(test)] -mod bench { - extern crate test; - use self::test::Bencher; - - #[bench] - fn alloc_owned_small(b: &mut Bencher) { - b.iter(|| { - box 10 - }) - } - - #[bench] - fn alloc_owned_big(b: &mut Bencher) { - b.iter(|| { - box [10, ..1000] - }) - } -} diff --git a/src/libstd/rt/heap.rs b/src/libstd/rt/heap.rs index 1b4b1e444fe..30583f76330 100644 --- a/src/libstd/rt/heap.rs +++ b/src/libstd/rt/heap.rs @@ -98,3 +98,125 @@ pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) { pub fn usable_size(size: uint, align: uint) -> uint { unsafe { je_nallocx(size as size_t, mallocx_align(align)) as uint } } + +/// The allocator for unique pointers. +#[cfg(stage0)] +#[lang="exchange_malloc"] +#[inline(always)] +pub unsafe fn exchange_malloc_(size: uint) -> *mut u8 { + exchange_malloc(size) +} + +/// The allocator for unique pointers. +#[cfg(not(test), not(stage0))] +#[lang="exchange_malloc"] +#[inline(always)] +pub unsafe fn exchange_malloc_(size: uint, align: uint) -> *mut u8 { + exchange_malloc(size, align) +} + +/// The allocator for unique pointers. +#[cfg(stage0)] +#[inline] +pub unsafe fn exchange_malloc(size: uint) -> *mut u8 { + // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size + // allocations can point to this `static`. It would be incorrect to use a null + // pointer, due to enums assuming types like unique pointers are never null. + static EMPTY: () = (); + + if size == 0 { + &EMPTY as *() as *mut u8 + } else { + allocate(size, 8) + } +} + +/// The allocator for unique pointers. +#[cfg(not(stage0))] +#[inline] +pub unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 { + // The compiler never calls `exchange_free` on ~ZeroSizeType, so zero-size + // allocations can point to this `static`. It would be incorrect to use a null + // pointer, due to enums assuming types like unique pointers are never null. + static EMPTY: () = (); + + if size == 0 { + &EMPTY as *() as *mut u8 + } else { + allocate(size, align) + } +} + +#[cfg(not(test))] +#[lang="exchange_free"] +#[inline] +// FIXME: #13994 (rustc should pass align and size here) +pub unsafe fn exchange_free_(ptr: *mut u8) { + exchange_free(ptr, 0, 8) +} + +#[inline] +pub unsafe fn exchange_free(ptr: *mut u8, size: uint, align: uint) { + deallocate(ptr, size, align); +} + +// FIXME: #7496 +#[cfg(not(test))] +#[lang="closure_exchange_malloc"] +#[inline] +unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut u8 { + let total_size = ::rt::util::get_box_size(size, align); + let p = allocate(total_size, 8); + + let alloc = p as *mut ::raw::Box<()>; + (*alloc).drop_glue = drop_glue; + + alloc as *mut u8 +} + +// hack for libcore +#[no_mangle] +#[doc(hidden)] +#[deprecated] +#[cfg(stage0, not(test))] +pub extern "C" fn rust_malloc(size: uint) -> *mut u8 { + unsafe { exchange_malloc(size) } +} + +// hack for libcore +#[no_mangle] +#[doc(hidden)] +#[deprecated] +#[cfg(not(stage0), not(test))] +pub extern "C" fn rust_malloc(size: uint, align: uint) -> *mut u8 { + unsafe { exchange_malloc(size, align) } +} + +// hack for libcore +#[no_mangle] +#[doc(hidden)] +#[deprecated] +#[cfg(not(test))] +pub extern "C" fn rust_free(ptr: *mut u8, size: uint, align: uint) { + unsafe { exchange_free(ptr, size, align) } +} + +#[cfg(test)] +mod bench { + extern crate test; + use self::test::Bencher; + + #[bench] + fn alloc_owned_small(b: &mut Bencher) { + b.iter(|| { + box 10 + }) + } + + #[bench] + fn alloc_owned_big(b: &mut Bencher) { + b.iter(|| { + box [10, ..1000] + }) + } +} diff --git a/src/libstd/rt/local_heap.rs b/src/libstd/rt/local_heap.rs index caf0d9028c5..8795736b3f5 100644 --- a/src/libstd/rt/local_heap.rs +++ b/src/libstd/rt/local_heap.rs @@ -12,6 +12,7 @@ use cast; use iter::Iterator; +use libc::{c_void, free}; use mem; use ops::Drop; use option::{Option, None, Some}; @@ -58,7 +59,7 @@ impl LocalHeap { #[inline] pub fn alloc(&mut self, drop_glue: fn(*mut u8), size: uint, align: uint) -> *mut Box { - let total_size = global_heap::get_box_size(size, align); + let total_size = ::rt::util::get_box_size(size, align); let alloc = self.memory_region.malloc(total_size); { // Make sure that we can't use `mybox` outside of this scope @@ -226,7 +227,7 @@ impl MemoryRegion { self.release(cast::transmute(alloc)); rtassert!(self.live_allocations > 0); self.live_allocations -= 1; - global_heap::exchange_free(alloc as *u8) + free(alloc as *mut c_void) } } diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 84284ca1faf..e8b1acb1024 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -26,6 +26,23 @@ use slice::ImmutableVector; // FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc. pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert); +#[deprecated] +#[doc(hidden)] +#[inline] +pub fn get_box_size(body_size: uint, body_align: uint) -> uint { + let header_size = ::mem::size_of::<::raw::Box<()>>(); + let total_size = align_to(header_size, body_align) + body_size; + total_size +} + +// Rounds |size| to the nearest |alignment|. Invariant: |alignment| is a power +// of two. +#[inline] +fn align_to(size: uint, align: uint) -> uint { + assert!(align != 0); + (size + align - 1) & !(align - 1) +} + /// Get the number of cores available pub fn num_cpus() -> uint { unsafe { diff --git a/src/libstd/slice.rs b/src/libstd/slice.rs index 21084407b8d..b97c55ad701 100644 --- a/src/libstd/slice.rs +++ b/src/libstd/slice.rs @@ -110,7 +110,7 @@ use ops::Drop; use option::{None, Option, Some}; use ptr::RawPtr; use ptr; -use rt::global_heap::{exchange_free}; +use rt::heap::{exchange_malloc, exchange_free}; use unstable::finally::try_finally; use vec::Vec; @@ -292,9 +292,9 @@ pub trait CloneableVector { impl<'a, T: Clone> CloneableVector for &'a [T] { /// Returns a copy of `v`. #[inline] + #[cfg(stage0)] fn to_owned(&self) -> ~[T] { use RawVec = core::raw::Vec; - use rt::global_heap::{malloc_raw, exchange_free}; use num::{CheckedAdd, CheckedMul}; use option::Expect; @@ -305,7 +305,8 @@ impl<'a, T: Clone> CloneableVector for &'a [T] { let size = size.expect("overflow in to_owned()"); unsafe { - let ret = malloc_raw(size) as *mut RawVec<()>; + // this should pass the real required alignment + let ret = exchange_malloc(size) as *mut RawVec<()>; (*ret).fill = len * mem::nonzero_size_of::(); (*ret).alloc = len * mem::nonzero_size_of::(); @@ -329,7 +330,55 @@ impl<'a, T: Clone> CloneableVector for &'a [T] { for j in range(0, *i as int) { ptr::read(&*p.offset(j)); } - exchange_free(ret as *u8); + // FIXME: #13994 (should pass align and size here) + exchange_free(ret as *mut u8, 0, 8); + }); + cast::transmute(ret) + } + } + + /// Returns a copy of `v`. + #[inline] + #[cfg(not(stage0))] + fn to_owned(&self) -> ~[T] { + use RawVec = core::raw::Vec; + use num::{CheckedAdd, CheckedMul}; + use option::Expect; + + let len = self.len(); + let data_size = len.checked_mul(&mem::size_of::()); + let data_size = data_size.expect("overflow in to_owned()"); + let size = mem::size_of::>().checked_add(&data_size); + let size = size.expect("overflow in to_owned()"); + + unsafe { + // this should pass the real required alignment + let ret = exchange_malloc(size, 8) as *mut RawVec<()>; + + (*ret).fill = len * mem::nonzero_size_of::(); + (*ret).alloc = len * mem::nonzero_size_of::(); + + // Be careful with the following loop. We want it to be optimized + // to a memcpy (or something similarly fast) when T is Copy. LLVM + // is easily confused, so any extra operations during the loop can + // prevent this optimization. + let mut i = 0; + let p = &mut (*ret).data as *mut _ as *mut T; + try_finally( + &mut i, (), + |i, ()| while *i < len { + mem::move_val_init( + &mut(*p.offset(*i as int)), + self.unsafe_ref(*i).clone()); + *i += 1; + }, + |i| if *i < len { + // we must be failing, clean up after ourselves + for j in range(0, *i as int) { + ptr::read(&*p.offset(j)); + } + // FIXME: #13994 (should pass align and size here) + exchange_free(ret as *mut u8, 0, 8); }); cast::transmute(ret) } @@ -768,7 +817,8 @@ impl Drop for MoveItems { // destroy the remaining elements for _x in *self {} unsafe { - exchange_free(self.allocation as *u8) + // FIXME: #13994 (should pass align and size here) + exchange_free(self.allocation, 0, 8) } } } diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 28035c32f8e..31b4112f1e7 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -1512,7 +1512,7 @@ impl FromVec for ~[T] { let vp = v.as_mut_ptr(); unsafe { - let ret = malloc_raw(size) as *mut RawVec<()>; + let ret = allocate(size, 8) as *mut RawVec<()>; (*ret).fill = len * mem::nonzero_size_of::(); (*ret).alloc = len * mem::nonzero_size_of::(); diff --git a/src/libsync/arc.rs b/src/libsync/arc.rs index f5369ec862f..226eb7afb5f 100644 --- a/src/libsync/arc.rs +++ b/src/libsync/arc.rs @@ -15,8 +15,9 @@ use std::cast; use std::ptr; -use std::rt::global_heap; +use std::rt::heap::exchange_free; use std::sync::atomics; +use std::mem::{min_align_of, size_of}; /// An atomically reference counted wrapper for shared state. /// @@ -190,7 +191,8 @@ impl Drop for Arc { if self.inner().weak.fetch_sub(1, atomics::Release) == 1 { atomics::fence(atomics::Acquire); - unsafe { global_heap::exchange_free(self.x as *u8) } + unsafe { exchange_free(self.x as *mut u8, size_of::>(), + min_align_of::>()) } } } } @@ -240,7 +242,8 @@ impl Drop for Weak { // the memory orderings if self.inner().weak.fetch_sub(1, atomics::Release) == 1 { atomics::fence(atomics::Acquire); - unsafe { global_heap::exchange_free(self.x as *u8) } + unsafe { exchange_free(self.x as *mut u8, size_of::>(), + min_align_of::>()) } } } } From f8e92cbbe3337974caca28b801efa26734b3c6f9 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Fri, 9 May 2014 23:56:15 -0400 Subject: [PATCH 05/11] fix Vec --- src/libstd/vec.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 31b4112f1e7..11c737672fb 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -91,6 +91,7 @@ impl Vec { /// let vec: Vec = Vec::with_capacity(10); /// ``` pub fn with_capacity(capacity: uint) -> Vec { + if size_of::() == 0 { return Vec { len: 0, cap: ::uint::MAX, ptr: 0 as *mut T } } if capacity == 0 { Vec::new() } else { @@ -486,6 +487,7 @@ impl Vec { /// assert_eq!(vec.capacity(), 11); /// ``` pub fn reserve_exact(&mut self, capacity: uint) { + if size_of::() == 0 { return } if capacity > self.cap { let size = capacity.checked_mul(&size_of::()).expect("capacity overflow"); unsafe { @@ -505,6 +507,7 @@ impl Vec { /// vec.shrink_to_fit(); /// ``` pub fn shrink_to_fit(&mut self) { + if size_of::() == 0 { return } if self.len == 0 { if self.cap != 0 { unsafe { @@ -559,6 +562,12 @@ impl Vec { /// ``` #[inline] pub fn push(&mut self, value: T) { + if size_of::() == 0 { + // zero-size types consume no memory, so we can't rely on the address space running out + self.len = self.len.checked_add(&1).expect("length overflow"); + unsafe { forget(value); } + return + } if self.len == self.cap { let old_size = self.cap * size_of::(); let size = max(old_size, 2 * size_of::()) * 2; @@ -1405,7 +1414,9 @@ impl Drop for Vec { for x in self.as_mut_slice().iter() { ptr::read(x); } - deallocate(self.ptr as *mut u8, self.cap * size_of::(), min_align_of::()) + if size_of::() != 0 { + deallocate(self.ptr as *mut u8, self.cap * size_of::(), min_align_of::()) + } } } } @@ -1460,7 +1471,9 @@ impl Drop for MoveItems { if self.cap != 0 { for _x in *self {} unsafe { - deallocate(self.allocation, self.cap * size_of::(), min_align_of::()) + if size_of::() != 0 { + deallocate(self.allocation, self.cap * size_of::(), min_align_of::()) + } } } } From 0621ccac626ef4ca15e2cdf0aceed13ad0d3f848 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Fri, 9 May 2014 23:56:28 -0400 Subject: [PATCH 06/11] vec: move some code inside alloc_or_realloc --- src/libstd/vec.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 11c737672fb..351d4f3eaff 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -403,11 +403,11 @@ impl Container for Vec { // FIXME: #13996: need a way to mark the return value as `noalias` #[inline(never)] -unsafe fn alloc_or_realloc(ptr: *mut u8, size: uint, align: uint, old_size: uint) -> *mut u8 { +unsafe fn alloc_or_realloc(ptr: *mut T, size: uint, old_size: uint) -> *mut T { if old_size == 0 { - allocate(size, align) + allocate(size, min_align_of::()) as *mut T } else { - reallocate(ptr, size, align, old_size) + reallocate(ptr as *mut u8, size, min_align_of::(), old_size) as *mut T } } @@ -491,8 +491,7 @@ impl Vec { if capacity > self.cap { let size = capacity.checked_mul(&size_of::()).expect("capacity overflow"); unsafe { - self.ptr = alloc_or_realloc(self.ptr as *mut u8, size, min_align_of::(), - self.cap * size_of::()) as *mut T; + self.ptr = alloc_or_realloc(self.ptr, size, self.cap * size_of::()); } self.cap = capacity; } @@ -573,8 +572,7 @@ impl Vec { let size = max(old_size, 2 * size_of::()) * 2; if old_size > size { fail!("capacity overflow") } unsafe { - self.ptr = alloc_or_realloc(self.ptr as *mut u8, size, min_align_of::(), - self.cap * size_of::()) as *mut u8 as *mut T; + self.ptr = alloc_or_realloc(self.ptr, size, self.cap * size_of::()); } self.cap = max(self.cap, 2) * 2; } From 7e84b221de541e9e1f3c97646c22ae57bcbc51bc Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Sat, 10 May 2014 00:35:56 -0400 Subject: [PATCH 07/11] vec: factor out some deallocation code --- src/libstd/vec.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 351d4f3eaff..aa10be1d1be 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -411,6 +411,13 @@ unsafe fn alloc_or_realloc(ptr: *mut T, size: uint, old_size: uint) -> *mut T } } +#[inline] +unsafe fn dealloc(ptr: *mut T, len: uint) { + if size_of::() != 0 { + deallocate(ptr as *mut u8, len * size_of::(), min_align_of::()) + } +} + impl Vec { /// Returns the number of elements the vector can hold without /// reallocating. @@ -510,7 +517,7 @@ impl Vec { if self.len == 0 { if self.cap != 0 { unsafe { - deallocate(self.ptr as *mut u8, self.cap * size_of::(), min_align_of::()) + dealloc(self.ptr, self.cap) } self.cap = 0; } @@ -658,7 +665,7 @@ impl Vec { pub fn move_iter(self) -> MoveItems { unsafe { let iter = transmute(self.as_slice().iter()); - let ptr = self.ptr as *mut u8; + let ptr = self.ptr; let cap = self.cap; forget(self); MoveItems { allocation: ptr, cap: cap, iter: iter } @@ -1412,9 +1419,7 @@ impl Drop for Vec { for x in self.as_mut_slice().iter() { ptr::read(x); } - if size_of::() != 0 { - deallocate(self.ptr as *mut u8, self.cap * size_of::(), min_align_of::()) - } + dealloc(self.ptr, self.cap) } } } @@ -1434,7 +1439,7 @@ impl fmt::Show for Vec { /// An iterator that moves out of a vector. pub struct MoveItems { - allocation: *mut u8, // the block of memory allocated for the vector + allocation: *mut T, // the block of memory allocated for the vector cap: uint, // the capacity of the vector iter: Items<'static, T> } @@ -1469,9 +1474,7 @@ impl Drop for MoveItems { if self.cap != 0 { for _x in *self {} unsafe { - if size_of::() != 0 { - deallocate(self.allocation, self.cap * size_of::(), min_align_of::()) - } + dealloc(self.allocation, self.cap); } } } From 87b658cf728b5d7e6b81012460454af634f6e6f8 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Wed, 7 May 2014 18:27:09 -0400 Subject: [PATCH 08/11] gitattributes: rm obsolete jemalloc entry --- .gitattributes | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index 052341b5845..d65c979a074 100644 --- a/.gitattributes +++ b/.gitattributes @@ -8,4 +8,3 @@ src/etc/pkg/rust-logo.ico binary src/etc/pkg/rust-logo.png binary src/rt/msvc/* -whitespace src/rt/vg/* -whitespace -src/rt/jemalloc/**/* -whitespace From 121ad1cb7db6517ed2aabc9c1514a99f5eb95149 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Fri, 9 May 2014 22:59:46 -0400 Subject: [PATCH 09/11] rename `global_heap` -> `libc_heap` This module only contains wrappers for malloc and realloc with out-of-memory checks. --- src/libcollections/hashmap.rs | 4 ++-- src/libnative/io/file_win32.rs | 2 +- src/librustuv/uvll.rs | 2 +- src/libstd/c_str.rs | 2 +- src/libstd/c_vec.rs | 2 +- src/libstd/rt/{global_heap.rs => libc_heap.rs} | 0 src/libstd/rt/local_heap.rs | 7 +++---- src/libstd/rt/mod.rs | 6 +++--- src/libstd/unstable/mutex.rs | 2 +- 9 files changed, 13 insertions(+), 14 deletions(-) rename src/libstd/rt/{global_heap.rs => libc_heap.rs} (100%) diff --git a/src/libcollections/hashmap.rs b/src/libcollections/hashmap.rs index 2d1de87fe06..1a222a27e47 100644 --- a/src/libcollections/hashmap.rs +++ b/src/libcollections/hashmap.rs @@ -42,7 +42,7 @@ mod table { use std::prelude::Drop; use std::ptr; use std::ptr::RawPtr; - use std::rt::global_heap; + use std::rt::libc_heap; use std::intrinsics::{size_of, min_align_of, transmute}; use std::intrinsics::{move_val_init, set_memory}; use std::iter::{Iterator, range_step_inclusive}; @@ -243,7 +243,7 @@ mod table { keys_size, min_align_of::< K >(), vals_size, min_align_of::< V >()); - let buffer = global_heap::malloc_raw(size) as *mut u8; + let buffer = libc_heap::malloc_raw(size) as *mut u8; // FIXME #13094: If malloc was not at as aligned as we expected, // our offset calculations are just plain wrong. We could support diff --git a/src/libnative/io/file_win32.rs b/src/libnative/io/file_win32.rs index 42e5ad062ee..5fc9e506cf2 100644 --- a/src/libnative/io/file_win32.rs +++ b/src/libnative/io/file_win32.rs @@ -339,7 +339,7 @@ pub fn mkdir(p: &CString, _mode: io::FilePermission) -> IoResult<()> { } pub fn readdir(p: &CString) -> IoResult> { - use std::rt::global_heap::malloc_raw; + use std::rt::libc_heap::malloc_raw; fn prune(root: &CString, dirs: Vec) -> Vec { let root = unsafe { CString::new(root.with_ref(|p| p), false) }; diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 57f4bd9d7eb..6236fd0e0e5 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -32,7 +32,7 @@ use libc::{size_t, c_int, c_uint, c_void, c_char, c_double}; use libc::{ssize_t, sockaddr, free, addrinfo}; use libc; -use std::rt::global_heap::malloc_raw; +use std::rt::libc_heap::malloc_raw; #[cfg(test)] use libc::uintptr_t; diff --git a/src/libstd/c_str.rs b/src/libstd/c_str.rs index 7de74dbe507..b33d211aa19 100644 --- a/src/libstd/c_str.rs +++ b/src/libstd/c_str.rs @@ -81,7 +81,7 @@ use str::StrSlice; use str; use slice::{ImmutableVector, MutableVector}; use slice; -use rt::global_heap::malloc_raw; +use rt::libc_heap::malloc_raw; use raw::Slice; /// The representation of a C String. diff --git a/src/libstd/c_vec.rs b/src/libstd/c_vec.rs index 4ef5af9275c..8c2c4fd1f0b 100644 --- a/src/libstd/c_vec.rs +++ b/src/libstd/c_vec.rs @@ -160,7 +160,7 @@ mod tests { use super::CVec; use libc; use ptr; - use rt::global_heap::malloc_raw; + use rt::libc_heap::malloc_raw; fn malloc(n: uint) -> CVec { unsafe { diff --git a/src/libstd/rt/global_heap.rs b/src/libstd/rt/libc_heap.rs similarity index 100% rename from src/libstd/rt/global_heap.rs rename to src/libstd/rt/libc_heap.rs diff --git a/src/libstd/rt/local_heap.rs b/src/libstd/rt/local_heap.rs index 8795736b3f5..efc8072594b 100644 --- a/src/libstd/rt/local_heap.rs +++ b/src/libstd/rt/local_heap.rs @@ -18,7 +18,7 @@ use ops::Drop; use option::{Option, None, Some}; use ptr; use ptr::RawPtr; -use rt::global_heap; +use rt::libc_heap; use rt::local::Local; use rt::task::Task; use raw; @@ -188,7 +188,7 @@ impl MemoryRegion { fn malloc(&mut self, size: uint) -> *mut Box { let total_size = size + AllocHeader::size(); let alloc: *AllocHeader = unsafe { - global_heap::malloc_raw(total_size) as *AllocHeader + libc_heap::malloc_raw(total_size) as *AllocHeader }; let alloc: &mut AllocHeader = unsafe { cast::transmute(alloc) }; @@ -207,8 +207,7 @@ impl MemoryRegion { let total_size = size + AllocHeader::size(); let alloc: *AllocHeader = unsafe { - global_heap::realloc_raw(orig_alloc as *mut u8, - total_size) as *AllocHeader + libc_heap::realloc_raw(orig_alloc as *mut u8, total_size) as *AllocHeader }; let alloc: &mut AllocHeader = unsafe { cast::transmute(alloc) }; diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 904921cfa18..a04cbabedd6 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -26,7 +26,7 @@ language and an implementation must be provided regardless of the execution environment. Of foremost importance is the global exchange heap, in the module -`global_heap`. Very little practical Rust code can be written without +`heap`. Very little practical Rust code can be written without access to the global heap. Unlike most of `rt` the global heap is truly a global resource and generally operates independently of the rest of the runtime. @@ -86,8 +86,8 @@ pub mod shouldnt_be_public { // Internal macros used by the runtime. mod macros; -// The global (exchange) heap. -pub mod global_heap; +/// Wrappers around malloc / realloc aborting on out-of-memory. +pub mod libc_heap; /// The low-level memory allocation API. pub mod heap; diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs index 8faedcbd9ed..c9d70915694 100644 --- a/src/libstd/unstable/mutex.rs +++ b/src/libstd/unstable/mutex.rs @@ -434,7 +434,7 @@ mod imp { #[cfg(windows)] mod imp { - use rt::global_heap::malloc_raw; + use rt::libc_heap::malloc_raw; use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR}; use libc; use ptr; From e2479b8cac0e4f5822e768f8ee8c735e87702712 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Sat, 10 May 2014 21:22:12 -0400 Subject: [PATCH 10/11] pass correct CFLAGS for jemalloc --- mk/platform.mk | 14 ++++++++++++++ mk/rt.mk | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/mk/platform.mk b/mk/platform.mk index cf9f9c95442..ecff34159da 100644 --- a/mk/platform.mk +++ b/mk/platform.mk @@ -145,6 +145,7 @@ CFG_LIB_NAME_x86_64-unknown-linux-gnu=lib$(1).so CFG_STATIC_LIB_NAME_x86_64-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_x86_64-unknown-linux-gnu := -m64 CFG_GCCISH_CFLAGS_x86_64-unknown-linux-gnu := -Wall -Werror -g -fPIC -m64 CFG_GCCISH_CXXFLAGS_x86_64-unknown-linux-gnu := -fno-rtti CFG_GCCISH_LINK_FLAGS_x86_64-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64 @@ -172,6 +173,7 @@ CFG_LIB_NAME_i686-unknown-linux-gnu=lib$(1).so CFG_STATIC_LIB_NAME_i686-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_i686-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_i686-unknown-linux-gnu=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_i686-unknown-linux-gnu := -m32 CFG_GCCISH_CFLAGS_i686-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32 CFG_GCCISH_CXXFLAGS_i686-unknown-linux-gnu := -fno-rtti CFG_GCCISH_LINK_FLAGS_i686-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m32 @@ -201,6 +203,7 @@ AR_arm-apple-darwin = $(shell xcrun -find -sdk iphoneos ar) CFG_LIB_NAME_arm-apple-darwin = lib$(1).dylib CFG_LIB_GLOB_arm-apple-darwin = lib$(1)-*.dylib CFG_LIB_DSYM_GLOB_arm-apple-darwin = lib$(1)-*.dylib.dSYM +CFG_CFLAGS_arm-apple-darwin := $(CFG_IOS_FLAGS) CFG_GCCISH_CFLAGS_arm-apple-darwin := -Wall -Werror -g -fPIC $(CFG_IOS_FLAGS) CFG_GCCISH_CXXFLAGS_arm-apple-darwin := -fno-rtti $(CFG_IOS_FLAGS) CFG_GCCISH_LINK_FLAGS_arm-apple-darwin := -dynamiclib -lpthread -framework CoreServices -Wl,-no_compact_unwind @@ -229,6 +232,7 @@ CFG_LIB_NAME_x86_64-apple-darwin=lib$(1).dylib CFG_STATIC_LIB_NAME_x86_64-apple-darwin=lib$(1).a CFG_LIB_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib CFG_LIB_DSYM_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_x86_64-apple-darwin := -m64 -arch x86_64 CFG_GCCISH_CFLAGS_x86_64-apple-darwin := -Wall -Werror -g -fPIC -m64 -arch x86_64 CFG_GCCISH_CXXFLAGS_x86_64-apple-darwin := -fno-rtti CFG_GCCISH_LINK_FLAGS_x86_64-apple-darwin := -dynamiclib -pthread -framework CoreServices -m64 @@ -256,6 +260,7 @@ CFG_LIB_NAME_i686-apple-darwin=lib$(1).dylib CFG_STATIC_LIB_NAME_i686-apple-darwin=lib$(1).a CFG_LIB_GLOB_i686-apple-darwin=lib$(1)-*.dylib CFG_LIB_DSYM_GLOB_i686-apple-darwin=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_i686-apple-darwin := -m32 -arch i386 CFG_GCCISH_CFLAGS_i686-apple-darwin := -Wall -Werror -g -fPIC -m32 -arch i386 CFG_GCCISH_CXXFLAGS_i686-apple-darwin := -fno-rtti CFG_GCCISH_LINK_FLAGS_i686-apple-darwin := -dynamiclib -pthread -framework CoreServices -m32 @@ -283,6 +288,7 @@ CFG_LIB_NAME_arm-linux-androideabi=lib$(1).so CFG_STATIC_LIB_NAME_arm-linux-androideabi=lib$(1).a CFG_LIB_GLOB_arm-linux-androideabi=lib$(1)-*.so CFG_LIB_DSYM_GLOB_arm-linux-androideabi=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_arm-linux-androideabi := -D__arm__ -DANDROID -D__ANDROID__ CFG_GCCISH_CFLAGS_arm-linux-androideabi := -Wall -g -fPIC -D__arm__ -DANDROID -D__ANDROID__ CFG_GCCISH_CXXFLAGS_arm-linux-androideabi := -fno-rtti CFG_GCCISH_LINK_FLAGS_arm-linux-androideabi := -shared -fPIC -ldl -g -lm -lsupc++ @@ -313,6 +319,7 @@ CFG_LIB_NAME_arm-unknown-linux-gnueabihf=lib$(1).so CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabihf=lib$(1).a CFG_LIB_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.so CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_arm-unknown-linux-gnueabihf := -D__arm__ CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabihf := -Wall -g -fPIC -D__arm__ CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabihf := -fno-rtti CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabihf := -shared -fPIC -g @@ -343,6 +350,7 @@ CFG_LIB_NAME_arm-unknown-linux-gnueabi=lib$(1).so CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabi=lib$(1).a CFG_LIB_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.so CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_arm-unknown-linux-gnueabi := -D__arm__ -mfpu=vfp CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabi := -Wall -g -fPIC -D__arm__ -mfpu=vfp CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabi := -fno-rtti CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabi := -shared -fPIC -g @@ -372,6 +380,7 @@ CFG_LIB_NAME_mips-unknown-linux-gnu=lib$(1).so CFG_STATIC_LIB_NAME_mips-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_mips-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_mips-unknown-linux-gnu=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_mips-unknown-linux-gnu := -mips32r2 -msoft-float -mabi=32 -mno-compact-eh CFG_GCCISH_CFLAGS_mips-unknown-linux-gnu := -Wall -g -fPIC -mips32r2 -msoft-float -mabi=32 -mno-compact-eh CFG_GCCISH_CXXFLAGS_mips-unknown-linux-gnu := -fno-rtti CFG_GCCISH_LINK_FLAGS_mips-unknown-linux-gnu := -shared -fPIC -g -mips32r2 -msoft-float -mabi=32 @@ -400,6 +409,7 @@ CFG_LIB_NAME_i686-pc-mingw32=$(1).dll CFG_STATIC_LIB_NAME_i686-pc-mingw32=$(1).lib CFG_LIB_GLOB_i686-pc-mingw32=$(1)-*.dll CFG_LIB_DSYM_GLOB_i686-pc-mingw32=$(1)-*.dylib.dSYM +CFG_CFLAGS_mips-i686-pc-mingw32 := -m32 -march=i686 -D_WIN32_WINNT=0x0600 CFG_GCCISH_CFLAGS_i686-pc-mingw32 := -Wall -Werror -g -m32 -march=i686 -D_WIN32_WINNT=0x0600 -I$(CFG_SRC_DIR)src/etc/mingw-fix-include CFG_GCCISH_CXXFLAGS_i686-pc-mingw32 := -fno-rtti CFG_GCCISH_LINK_FLAGS_i686-pc-mingw32 := -shared -fPIC -g -m32 @@ -428,6 +438,7 @@ CFG_LIB_NAME_i586-mingw32msvc=$(1).dll CFG_STATIC_LIB_NAME_i586-mingw32msvc=$(1).lib CFG_LIB_GLOB_i586-mingw32msvc=$(1)-*.dll CFG_LIB_DSYM_GLOB_i586-mingw32msvc=$(1)-*.dylib.dSYM +CFG_CFLAGS_i586-mingw32msvc := -march=i586 -m32 CFG_GCCISH_CFLAGS_i586-mingw32msvc := -Wall -Werror -g -march=i586 -m32 CFG_GCCISH_CXXFLAGS_i586-mingw32msvc := -fno-rtti CFG_GCCISH_LINK_FLAGS_i586-mingw32msvc := -shared -g -m32 @@ -458,6 +469,7 @@ CFG_LIB_NAME_i686-w64-mingw32=$(1).dll CFG_STATIC_LIB_NAME_i686-w64-mingw32=$(1).lib CFG_LIB_GLOB_i686-w64-mingw32=$(1)-*.dll CFG_LIB_DSYM_GLOB_i686-w64-mingw32=$(1)-*.dylib.dSYM +CFG_CFLAGS_i586-w64-mingw32 := -march=i586 -m32 -D_WIN32_WINNT=0x0600 CFG_GCCISH_CFLAGS_i686-w64-mingw32 := -Wall -Werror -g -m32 -D_WIN32_WINNT=0x0600 CFG_GCCISH_CXXFLAGS_i686-w64-mingw32 := -fno-rtti CFG_GCCISH_LINK_FLAGS_i686-w64-mingw32 := -shared -g -m32 @@ -487,6 +499,7 @@ CFG_LIB_NAME_x86_64-w64-mingw32=$(1).dll CFG_STATIC_LIB_NAME_x86_64-w64-mingw32=$(1).lib CFG_LIB_GLOB_x86_64-w64-mingw32=$(1)-*.dll CFG_LIB_DSYM_GLOB_x86_64-w64-mingw32=$(1)-*.dylib.dSYM +CFG_CFLAGS_x86_64-w64-mingw32 := -m64 -D_WIN32_WINNT=0x0600 CFG_GCCISH_CFLAGS_x86_64-w64-mingw32 := -Wall -Werror -g -m64 -D_WIN32_WINNT=0x0600 CFG_GCCISH_CXXFLAGS_x86_64-w64-mingw32 := -fno-rtti CFG_GCCISH_LINK_FLAGS_x86_64-w64-mingw32 := -shared -g -m64 @@ -515,6 +528,7 @@ CFG_LIB_NAME_x86_64-unknown-freebsd=lib$(1).so CFG_STATIC_LIB_NAME_x86_64-unknown-freebsd=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-freebsd=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-freebsd=$(1)-*.dylib.dSYM +CFG_CFLAGS_x86_64-unknown-freebsd := -I/usr/local/include CFG_GCCISH_CFLAGS_x86_64-unknown-freebsd := -Wall -Werror -g -fPIC -I/usr/local/include CFG_GCCISH_LINK_FLAGS_x86_64-unknown-freebsd := -shared -fPIC -g -pthread -lrt CFG_GCCISH_DEF_FLAG_x86_64-unknown-freebsd := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/rt.mk b/mk/rt.mk index e4a548dd7bf..b18b13c25c0 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -260,7 +260,7 @@ $$(JEMALLOC_LIB_$(1)): $$(JEMALLOC_DEPS) $$(MKFILE_DEPS) CC="$$(CC_$(1))" \ AR="$$(AR_$(1))" \ RANLIB="$$(AR_$(1)) s" \ - EXTRA_CFLAGS="$$(CFG_GCCISH_CFLAGS)" + EXTRA_CFLAGS="$$(CFG_CFLAGS_$(1))" $$(Q)$$(MAKE) -C "$$(JEMALLOC_BUILD_DIR_$(1))" build_lib_static $$(Q)cp $$(JEMALLOC_BUILD_DIR_$(1))/lib/$$(JEMALLOC_REAL_NAME_$(1)) $$(JEMALLOC_LIB_$(1)) From 81fadbbc4182c5a34e0d2ff698471abfc1ec0e33 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Sun, 11 May 2014 00:54:41 -0400 Subject: [PATCH 11/11] android workaround --- src/libstd/rt/heap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/rt/heap.rs b/src/libstd/rt/heap.rs index 30583f76330..ffe6dccfa15 100644 --- a/src/libstd/rt/heap.rs +++ b/src/libstd/rt/heap.rs @@ -25,7 +25,7 @@ extern { } // -lpthread needs to occur after -ljemalloc, the earlier argument isn't enough -#[cfg(not(windows))] +#[cfg(not(windows), not(target_os = "android"))] #[link(name = "pthread")] extern {}