mirror of
https://github.com/rust-lang/rust.git
synced 2025-01-22 04:34:51 +00:00
Lots of work on memory tracking and channels.
We're trying to get closer to doing correct move semantics for channel operations. This involves a lot of cleanup (such as removing the unused sched parameter from rust_vec constructor) and making circular_buffer kernel_owned. Added tagging for memory allocations. This means we give a string tag to everything we allocate. If we leak something and TRACK_ALLOCATIONS is enabled, then it's much easier now to tell exactly what is leaking.
This commit is contained in:
parent
a44fb04d57
commit
3ae4dcd41e
@ -38,6 +38,7 @@ type upcalls =
|
||||
ValueRef flush_chan,
|
||||
ValueRef del_chan,
|
||||
ValueRef clone_chan,
|
||||
ValueRef chan_target_task,
|
||||
ValueRef _yield,
|
||||
ValueRef sleep,
|
||||
ValueRef send,
|
||||
@ -95,6 +96,9 @@ fn declare_upcalls(type_names tn, TypeRef tydesc_type, TypeRef taskptr_type,
|
||||
del_chan=dv("del_chan", ~[T_opaque_chan_ptr()]),
|
||||
clone_chan=d("clone_chan", ~[taskptr_type, T_opaque_chan_ptr()],
|
||||
T_opaque_chan_ptr()),
|
||||
chan_target_task=d("chan_target_task",
|
||||
~[T_opaque_chan_ptr()],
|
||||
taskptr_type),
|
||||
_yield=dv("yield", empty_vec),
|
||||
sleep=dv("sleep", ~[T_size_t()]),
|
||||
send=dv("send", ~[T_opaque_chan_ptr(), T_ptr(T_i8())]),
|
||||
|
@ -22,6 +22,12 @@ import back::link::mangle_internal_name_by_path_and_seq;
|
||||
import trans_common::*;
|
||||
import trans::*;
|
||||
|
||||
export trans_port;
|
||||
export trans_chan;
|
||||
export trans_spawn;
|
||||
export trans_send;
|
||||
export trans_recv;
|
||||
|
||||
fn trans_port(&@block_ctxt cx, ast::node_id id) -> result {
|
||||
auto t = node_id_type(cx.fcx.lcx.ccx, id);
|
||||
auto unit_ty;
|
||||
@ -123,13 +129,7 @@ fn trans_spawn(&@block_ctxt cx, &ast::spawn_dom dom, &option::t[str] name,
|
||||
auto llargs = alloc_ty(bcx, args_ty);
|
||||
auto i = 0u;
|
||||
for (ValueRef v in arg_vals) {
|
||||
// log_err #fmt("ty(llargs) = %s",
|
||||
// val_str(bcx.fcx.lcx.ccx.tn, llargs.val));
|
||||
|
||||
auto target = bcx.build.GEP(llargs.val, ~[C_int(0), C_int(i as int)]);
|
||||
// log_err #fmt("ty(v) = %s", val_str(bcx.fcx.lcx.ccx.tn, v));
|
||||
// log_err #fmt("ty(target) = %s",
|
||||
// val_str(bcx.fcx.lcx.ccx.tn, target));
|
||||
|
||||
bcx.build.Store(v, target);
|
||||
i += 1u;
|
||||
@ -199,55 +199,6 @@ fn mk_spawn_wrapper(&@block_ctxt cx, &@ast::expr func, &ty::t args_ty) ->
|
||||
ret rslt(cx, llfndecl);
|
||||
}
|
||||
|
||||
// Does a deep copy of a value. This is needed for passing arguments to child
|
||||
// tasks, and for sending things through channels. There are probably some
|
||||
// uniqueness optimizations and things we can do here for tasks in the same
|
||||
// domain.
|
||||
fn deep_copy(&@block_ctxt bcx, ValueRef v, ty::t t, ValueRef target_task)
|
||||
-> result
|
||||
{
|
||||
// TODO: make sure all paths add any reference counting that they need to.
|
||||
|
||||
// TODO: Teach deep copy to understand everything else it needs to.
|
||||
|
||||
auto tcx = bcx.fcx.lcx.ccx.tcx;
|
||||
if(ty::type_is_scalar(tcx, t)) {
|
||||
ret rslt(bcx, v);
|
||||
}
|
||||
else if(ty::type_is_str(tcx, t)) {
|
||||
ret rslt(bcx,
|
||||
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.dup_str,
|
||||
~[bcx.fcx.lltaskptr, target_task, v]));
|
||||
}
|
||||
else if(ty::type_is_chan(tcx, t)) {
|
||||
// If this is a channel, we need to clone it.
|
||||
auto chan_ptr = bcx.build.PointerCast(v, T_opaque_chan_ptr());
|
||||
|
||||
auto chan_raw_val =
|
||||
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.clone_chan,
|
||||
~[bcx.fcx.lltaskptr, target_task, chan_ptr]);
|
||||
|
||||
// Cast back to the type the context was expecting.
|
||||
auto chan_val = bcx.build.PointerCast(chan_raw_val,
|
||||
val_ty(v));
|
||||
|
||||
ret rslt(bcx, chan_val);
|
||||
}
|
||||
else if(ty::type_is_structural(tcx, t)) {
|
||||
fn inner_deep_copy(&@block_ctxt bcx, ValueRef v, ty::t t) -> result {
|
||||
log_err "Unimplemented type for deep_copy.";
|
||||
fail;
|
||||
}
|
||||
|
||||
ret iter_structural_ty(bcx, v, t, inner_deep_copy);
|
||||
}
|
||||
else {
|
||||
bcx.fcx.lcx.ccx.sess.bug("unexpected type in " +
|
||||
"trans::deep_copy: " +
|
||||
ty_to_str(tcx, t));
|
||||
}
|
||||
}
|
||||
|
||||
fn trans_send(&@block_ctxt cx, &@ast::expr lhs, &@ast::expr rhs,
|
||||
ast::node_id id) -> result {
|
||||
auto bcx = cx;
|
||||
@ -302,3 +253,52 @@ fn recv_val(&@block_ctxt cx, ValueRef to, &@ast::expr from, &ty::t unit_ty,
|
||||
ret rslt(bcx, to);
|
||||
}
|
||||
|
||||
// Does a deep copy of a value. This is needed for passing arguments to child
|
||||
// tasks, and for sending things through channels. There are probably some
|
||||
// uniqueness optimizations and things we can do here for tasks in the same
|
||||
// domain.
|
||||
fn deep_copy(&@block_ctxt bcx, ValueRef v, ty::t t, ValueRef target_task)
|
||||
-> result
|
||||
{
|
||||
// TODO: make sure all paths add any reference counting that they need to.
|
||||
|
||||
// TODO: Teach deep copy to understand everything else it needs to.
|
||||
|
||||
auto tcx = bcx.fcx.lcx.ccx.tcx;
|
||||
if(ty::type_is_scalar(tcx, t)) {
|
||||
ret rslt(bcx, v);
|
||||
}
|
||||
else if(ty::type_is_str(tcx, t)) {
|
||||
ret rslt(bcx,
|
||||
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.dup_str,
|
||||
~[bcx.fcx.lltaskptr, target_task, v]));
|
||||
}
|
||||
else if(ty::type_is_chan(tcx, t)) {
|
||||
// If this is a channel, we need to clone it.
|
||||
auto chan_ptr = bcx.build.PointerCast(v, T_opaque_chan_ptr());
|
||||
|
||||
auto chan_raw_val =
|
||||
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.clone_chan,
|
||||
~[bcx.fcx.lltaskptr, target_task, chan_ptr]);
|
||||
|
||||
// Cast back to the type the context was expecting.
|
||||
auto chan_val = bcx.build.PointerCast(chan_raw_val,
|
||||
val_ty(v));
|
||||
|
||||
ret rslt(bcx, chan_val);
|
||||
}
|
||||
else if(ty::type_is_structural(tcx, t)) {
|
||||
fn inner_deep_copy(&@block_ctxt bcx, ValueRef v, ty::t t) -> result {
|
||||
log_err "Unimplemented type for deep_copy.";
|
||||
fail;
|
||||
}
|
||||
|
||||
ret iter_structural_ty(bcx, v, t, inner_deep_copy);
|
||||
}
|
||||
else {
|
||||
bcx.fcx.lcx.ccx.sess.bug("unexpected type in " +
|
||||
"trans::deep_copy: " +
|
||||
ty_to_str(tcx, t));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,14 +4,14 @@
|
||||
|
||||
#include "rust_internal.h"
|
||||
|
||||
circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) :
|
||||
sched(task->sched),
|
||||
task(task),
|
||||
circular_buffer::circular_buffer(rust_kernel *kernel, size_t unit_sz) :
|
||||
sched(kernel->sched),
|
||||
kernel(kernel),
|
||||
unit_sz(unit_sz),
|
||||
_buffer_sz(initial_size()),
|
||||
_next(0),
|
||||
_unread(0),
|
||||
_buffer((uint8_t *)task->malloc(_buffer_sz)) {
|
||||
_buffer((uint8_t *)kernel->malloc(_buffer_sz, "circular_buffer")) {
|
||||
|
||||
A(sched, unit_sz, "Unit size must be larger than zero.");
|
||||
|
||||
@ -20,7 +20,6 @@ circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) :
|
||||
_buffer_sz, _unread, this);
|
||||
|
||||
A(sched, _buffer, "Failed to allocate buffer.");
|
||||
task->ref();
|
||||
}
|
||||
|
||||
circular_buffer::~circular_buffer() {
|
||||
@ -28,8 +27,7 @@ circular_buffer::~circular_buffer() {
|
||||
I(sched, _buffer);
|
||||
W(sched, _unread == 0,
|
||||
"freeing circular_buffer with %d unread bytes", _unread);
|
||||
task->free(_buffer);
|
||||
--task->ref_count;
|
||||
kernel->free(_buffer);
|
||||
}
|
||||
|
||||
size_t
|
||||
@ -144,9 +142,10 @@ circular_buffer::grow() {
|
||||
size_t new_buffer_sz = _buffer_sz * 2;
|
||||
I(sched, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE);
|
||||
DLOG(sched, mem, "circular_buffer is growing to %d bytes", new_buffer_sz);
|
||||
void *new_buffer = task->malloc(new_buffer_sz);
|
||||
void *new_buffer = kernel->malloc(new_buffer_sz,
|
||||
"new circular_buffer (grow)");
|
||||
transfer(new_buffer);
|
||||
task->free(_buffer);
|
||||
kernel->free(_buffer);
|
||||
_buffer = (uint8_t *)new_buffer;
|
||||
_next = 0;
|
||||
_buffer_sz = new_buffer_sz;
|
||||
@ -158,9 +157,10 @@ circular_buffer::shrink() {
|
||||
I(sched, initial_size() <= new_buffer_sz);
|
||||
DLOG(sched, mem, "circular_buffer is shrinking to %d bytes",
|
||||
new_buffer_sz);
|
||||
void *new_buffer = task->malloc(new_buffer_sz);
|
||||
void *new_buffer = kernel->malloc(new_buffer_sz,
|
||||
"new circular_buffer (shrink)");
|
||||
transfer(new_buffer);
|
||||
task->free(_buffer);
|
||||
kernel->free(_buffer);
|
||||
_buffer = (uint8_t *)new_buffer;
|
||||
_next = 0;
|
||||
_buffer_sz = new_buffer_sz;
|
||||
|
@ -6,17 +6,17 @@
|
||||
#define CIRCULAR_BUFFER_H
|
||||
|
||||
class
|
||||
circular_buffer : public task_owned<circular_buffer> {
|
||||
circular_buffer : public kernel_owned<circular_buffer> {
|
||||
static const size_t INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS = 8;
|
||||
static const size_t MAX_CIRCULAR_BUFFER_SIZE = 1 << 24;
|
||||
|
||||
rust_scheduler *sched;
|
||||
|
||||
public:
|
||||
rust_task *task;
|
||||
rust_kernel *kernel;
|
||||
// Size of the data unit in bytes.
|
||||
const size_t unit_sz;
|
||||
circular_buffer(rust_task *task, size_t unit_sz);
|
||||
circular_buffer(rust_kernel *kernel, size_t unit_sz);
|
||||
~circular_buffer();
|
||||
void transfer(void *dst);
|
||||
void enqueue(void *src);
|
||||
|
@ -3,33 +3,38 @@
|
||||
#define MEMORY_H
|
||||
|
||||
// FIXME: It would be really nice to be able to get rid of this.
|
||||
inline void *operator new[](size_t size, rust_task *task) {
|
||||
return task->malloc(size);
|
||||
inline void *operator new[](size_t size, rust_task *task, const char *tag) {
|
||||
return task->malloc(size, tag);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void *task_owned<T>::operator new(size_t size, rust_task *task) {
|
||||
return task->malloc(size);
|
||||
inline void *task_owned<T>::operator new(size_t size, rust_task *task,
|
||||
const char *tag) {
|
||||
return task->malloc(size, tag);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void *task_owned<T>::operator new[](size_t size, rust_task *task) {
|
||||
return task->malloc(size);
|
||||
inline void *task_owned<T>::operator new[](size_t size, rust_task *task,
|
||||
const char *tag) {
|
||||
return task->malloc(size, tag);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void *task_owned<T>::operator new(size_t size, rust_task &task) {
|
||||
return task.malloc(size);
|
||||
inline void *task_owned<T>::operator new(size_t size, rust_task &task,
|
||||
const char *tag) {
|
||||
return task.malloc(size, tag);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void *task_owned<T>::operator new[](size_t size, rust_task &task) {
|
||||
return task.malloc(size);
|
||||
inline void *task_owned<T>::operator new[](size_t size, rust_task &task,
|
||||
const char *tag) {
|
||||
return task.malloc(size, tag);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void *kernel_owned<T>::operator new(size_t size, rust_kernel *kernel) {
|
||||
return kernel->malloc(size);
|
||||
inline void *kernel_owned<T>::operator new(size_t size, rust_kernel *kernel,
|
||||
const char *tag) {
|
||||
return kernel->malloc(size, tag);
|
||||
}
|
||||
|
||||
|
||||
|
@ -4,7 +4,13 @@
|
||||
// NB: please do not commit code with this uncommented. It's
|
||||
// hugely expensive and should only be used as a last resort.
|
||||
//
|
||||
// #define TRACK_ALLOCATIONS
|
||||
#define TRACK_ALLOCATIONS
|
||||
|
||||
#define MAGIC 0xbadc0ffe
|
||||
|
||||
memory_region::alloc_header *memory_region::get_header(void *mem) {
|
||||
return (alloc_header *)((char *)mem - sizeof(alloc_header));
|
||||
}
|
||||
|
||||
memory_region::memory_region(rust_srv *srv, bool synchronized) :
|
||||
_srv(srv), _parent(NULL), _live_allocations(0),
|
||||
@ -19,37 +25,37 @@ memory_region::memory_region(memory_region *parent) :
|
||||
}
|
||||
|
||||
void memory_region::add_alloc() {
|
||||
//_live_allocations++;
|
||||
sync::increment(_live_allocations);
|
||||
_live_allocations++;
|
||||
//sync::increment(_live_allocations);
|
||||
}
|
||||
|
||||
void memory_region::dec_alloc() {
|
||||
//_live_allocations--;
|
||||
sync::decrement(_live_allocations);
|
||||
_live_allocations--;
|
||||
//sync::decrement(_live_allocations);
|
||||
}
|
||||
|
||||
void memory_region::free(void *mem) {
|
||||
// printf("free: ptr 0x%" PRIxPTR" region=%p\n", (uintptr_t) mem, this);
|
||||
if (!mem) { return; }
|
||||
if (_synchronized) { _lock.lock(); }
|
||||
alloc_header *alloc = get_header(mem);
|
||||
assert(alloc->magic == MAGIC);
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
int index = ((int *)mem)[-1];
|
||||
if (_allocation_list[index] != (uint8_t *)mem - sizeof(int)) {
|
||||
if (_allocation_list[alloc->index] != alloc) {
|
||||
printf("free: ptr 0x%" PRIxPTR " is not in allocation_list\n",
|
||||
(uintptr_t) mem);
|
||||
_srv->fatal("not in allocation_list", __FILE__, __LINE__, "");
|
||||
}
|
||||
else {
|
||||
// printf("freed index %d\n", index);
|
||||
_allocation_list[index] = NULL;
|
||||
_allocation_list[alloc->index] = NULL;
|
||||
}
|
||||
mem = (void*)((uint8_t*)mem - sizeof(int));
|
||||
#endif
|
||||
if (_live_allocations < 1) {
|
||||
_srv->fatal("live_allocs < 1", __FILE__, __LINE__, "");
|
||||
}
|
||||
dec_alloc();
|
||||
_srv->free(mem);
|
||||
_srv->free(alloc);
|
||||
if (_synchronized) { _lock.unlock(); }
|
||||
}
|
||||
|
||||
@ -59,77 +65,55 @@ memory_region::realloc(void *mem, size_t size) {
|
||||
if (!mem) {
|
||||
add_alloc();
|
||||
}
|
||||
size += sizeof(alloc_header);
|
||||
alloc_header *alloc = get_header(mem);
|
||||
assert(alloc->magic == MAGIC);
|
||||
alloc_header *newMem = (alloc_header *)_srv->realloc(alloc, size);
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
size += sizeof(int);
|
||||
mem = (void*)((uint8_t*)mem - sizeof(int));
|
||||
int index = *(int *)mem;
|
||||
#endif
|
||||
void *newMem = _srv->realloc(mem, size);
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
if (_allocation_list[index] != mem) {
|
||||
if (_allocation_list[newMem->index] != alloc) {
|
||||
printf("at index %d, found %p, expected %p\n",
|
||||
index, _allocation_list[index], mem);
|
||||
alloc->index, _allocation_list[alloc->index], alloc);
|
||||
printf("realloc: ptr 0x%" PRIxPTR " is not in allocation_list\n",
|
||||
(uintptr_t) mem);
|
||||
_srv->fatal("not in allocation_list", __FILE__, __LINE__, "");
|
||||
}
|
||||
else {
|
||||
_allocation_list[index] = newMem;
|
||||
(*(int*)newMem) = index;
|
||||
_allocation_list[newMem->index] = newMem;
|
||||
// printf("realloc: stored %p at index %d, replacing %p\n",
|
||||
// newMem, index, mem);
|
||||
}
|
||||
#endif
|
||||
if (_synchronized) { _lock.unlock(); }
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
newMem = (void *)((uint8_t*)newMem + sizeof(int));
|
||||
#endif
|
||||
return newMem;
|
||||
return newMem->data;
|
||||
}
|
||||
|
||||
void *
|
||||
memory_region::malloc(size_t size) {
|
||||
memory_region::malloc(size_t size, const char *tag, bool zero) {
|
||||
if (_synchronized) { _lock.lock(); }
|
||||
add_alloc();
|
||||
size_t old_size = size;
|
||||
size += sizeof(alloc_header);
|
||||
alloc_header *mem = (alloc_header *)_srv->malloc(size);
|
||||
mem->magic = MAGIC;
|
||||
mem->tag = tag;
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
size += sizeof(int);
|
||||
#endif
|
||||
void *mem = _srv->malloc(size);
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
int index = _allocation_list.append(mem);
|
||||
int *p = (int *)mem;
|
||||
*p = index;
|
||||
mem->index = _allocation_list.append(mem);
|
||||
// printf("malloc: stored %p at index %d\n", mem, index);
|
||||
#endif
|
||||
// printf("malloc: ptr 0x%" PRIxPTR " region=%p\n",
|
||||
// (uintptr_t) mem, this);
|
||||
|
||||
if(zero) {
|
||||
memset(mem->data, 0, old_size);
|
||||
}
|
||||
|
||||
if (_synchronized) { _lock.unlock(); }
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
mem = (void*)((uint8_t*)mem + sizeof(int));
|
||||
#endif
|
||||
return mem;
|
||||
return mem->data;
|
||||
}
|
||||
|
||||
void *
|
||||
memory_region::calloc(size_t size) {
|
||||
if (_synchronized) { _lock.lock(); }
|
||||
add_alloc();
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
size += sizeof(int);
|
||||
#endif
|
||||
void *mem = _srv->malloc(size);
|
||||
memset(mem, 0, size);
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
int index = _allocation_list.append(mem);
|
||||
int *p = (int *)mem;
|
||||
*p = index;
|
||||
// printf("calloc: stored %p at index %d\n", mem, index);
|
||||
#endif
|
||||
if (_synchronized) { _lock.unlock(); }
|
||||
#ifdef TRACK_ALLOCATIONS
|
||||
mem = (void*)((uint8_t*)mem + sizeof(int));
|
||||
#endif
|
||||
return mem;
|
||||
memory_region::calloc(size_t size, const char *tag) {
|
||||
return malloc(size, tag, true);
|
||||
}
|
||||
|
||||
memory_region::~memory_region() {
|
||||
|
@ -15,10 +15,19 @@ class rust_srv;
|
||||
|
||||
class memory_region {
|
||||
private:
|
||||
struct alloc_header {
|
||||
uint32_t magic;
|
||||
int index;
|
||||
const char *tag;
|
||||
char data[];
|
||||
};
|
||||
|
||||
alloc_header *get_header(void *mem);
|
||||
|
||||
rust_srv *_srv;
|
||||
memory_region *_parent;
|
||||
size_t _live_allocations;
|
||||
array_list<void *> _allocation_list;
|
||||
array_list<alloc_header *> _allocation_list;
|
||||
const bool _detailed_leaks;
|
||||
const bool _synchronized;
|
||||
lock_and_signal _lock;
|
||||
@ -29,8 +38,8 @@ private:
|
||||
public:
|
||||
memory_region(rust_srv *srv, bool synchronized);
|
||||
memory_region(memory_region *parent);
|
||||
void *malloc(size_t size);
|
||||
void *calloc(size_t size);
|
||||
void *malloc(size_t size, const char *tag, bool zero = true);
|
||||
void *calloc(size_t size, const char *tag);
|
||||
void *realloc(void *mem, size_t size);
|
||||
void free(void *mem);
|
||||
virtual ~memory_region();
|
||||
@ -40,12 +49,14 @@ public:
|
||||
void hack_allow_leaks();
|
||||
};
|
||||
|
||||
inline void *operator new(size_t size, memory_region ®ion) {
|
||||
return region.malloc(size);
|
||||
inline void *operator new(size_t size, memory_region ®ion,
|
||||
const char *tag) {
|
||||
return region.malloc(size, tag);
|
||||
}
|
||||
|
||||
inline void *operator new(size_t size, memory_region *region) {
|
||||
return region->malloc(size);
|
||||
inline void *operator new(size_t size, memory_region *region,
|
||||
const char *tag) {
|
||||
return region->malloc(size, tag);
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -24,12 +24,14 @@ command_line_args : public kernel_owned<command_line_args>
|
||||
LPCWSTR cmdline = GetCommandLineW();
|
||||
LPWSTR *wargv = CommandLineToArgvW(cmdline, &argc);
|
||||
kernel->win32_require("CommandLineToArgvW", wargv != NULL);
|
||||
argv = (char **) kernel->malloc(sizeof(char*) * argc);
|
||||
argv = (char **) kernel->malloc(sizeof(char*) * argc,
|
||||
"win32 command line");
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
int n_chars = WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1,
|
||||
NULL, 0, NULL, NULL);
|
||||
kernel->win32_require("WideCharToMultiByte(0)", n_chars != 0);
|
||||
argv[i] = (char *) kernel->malloc(n_chars);
|
||||
argv[i] = (char *) kernel->malloc(n_chars,
|
||||
"win32 command line arg");
|
||||
n_chars = WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1,
|
||||
argv[i], n_chars, NULL, NULL);
|
||||
kernel->win32_require("WideCharToMultiByte(1)", n_chars != 0);
|
||||
@ -38,14 +40,14 @@ command_line_args : public kernel_owned<command_line_args>
|
||||
#endif
|
||||
size_t vec_fill = sizeof(rust_str *) * argc;
|
||||
size_t vec_alloc = next_power_of_two(sizeof(rust_vec) + vec_fill);
|
||||
void *mem = kernel->malloc(vec_alloc);
|
||||
args = new (mem) rust_vec(task->sched, vec_alloc, 0, NULL);
|
||||
void *mem = kernel->malloc(vec_alloc, "command line");
|
||||
args = new (mem) rust_vec(vec_alloc, 0, NULL);
|
||||
rust_str **strs = (rust_str**) &args->data[0];
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
size_t str_fill = strlen(argv[i]) + 1;
|
||||
size_t str_alloc = next_power_of_two(sizeof(rust_str) + str_fill);
|
||||
mem = kernel->malloc(str_alloc);
|
||||
strs[i] = new (mem) rust_str(task->sched, str_alloc, str_fill,
|
||||
mem = kernel->malloc(str_alloc, "command line arg");
|
||||
strs[i] = new (mem) rust_str(str_alloc, str_fill,
|
||||
(uint8_t const *)argv[i]);
|
||||
}
|
||||
args->fill = vec_fill;
|
||||
@ -106,7 +108,8 @@ rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) {
|
||||
kernel->start();
|
||||
rust_scheduler *sched = kernel->get_scheduler();
|
||||
command_line_args *args
|
||||
= new (kernel) command_line_args(sched->root_task, argc, argv);
|
||||
= new (kernel, "main command line args")
|
||||
command_line_args(sched->root_task, argc, argv);
|
||||
|
||||
DLOG(sched, dom, "startup: %d args in 0x%" PRIxPTR,
|
||||
args->argc, (uintptr_t)args->args);
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
extern "C" CDECL rust_str*
|
||||
last_os_error(rust_task *task) {
|
||||
rust_scheduler *sched = task->sched;
|
||||
LOG(task, task, "last_os_error()");
|
||||
|
||||
#if defined(__WIN32__)
|
||||
@ -42,12 +41,12 @@ last_os_error(rust_task *task) {
|
||||
#endif
|
||||
size_t fill = strlen(buf) + 1;
|
||||
size_t alloc = next_power_of_two(sizeof(rust_str) + fill);
|
||||
void *mem = task->malloc(alloc);
|
||||
void *mem = task->malloc(alloc, "rust_str(last_os_error)");
|
||||
if (!mem) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
}
|
||||
rust_str *st = new (mem) rust_str(sched, alloc, fill,
|
||||
rust_str *st = new (mem) rust_str(alloc, fill,
|
||||
(const uint8_t *)buf);
|
||||
|
||||
#ifdef __WIN32__
|
||||
@ -58,7 +57,6 @@ last_os_error(rust_task *task) {
|
||||
|
||||
extern "C" CDECL rust_str *
|
||||
rust_getcwd(rust_task *task) {
|
||||
rust_scheduler *sched = task->sched;
|
||||
LOG(task, task, "rust_getcwd()");
|
||||
|
||||
char cbuf[BUF_BYTES];
|
||||
@ -74,14 +72,14 @@ rust_getcwd(rust_task *task) {
|
||||
|
||||
size_t fill = strlen(cbuf) + 1;
|
||||
size_t alloc = next_power_of_two(sizeof(rust_str) + fill);
|
||||
void *mem = task->malloc(alloc);
|
||||
void *mem = task->malloc(alloc, "rust_str(getcwd)");
|
||||
if (!mem) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rust_str *st;
|
||||
st = new (mem) rust_str(sched, alloc, fill, (const uint8_t *)cbuf);
|
||||
st = new (mem) rust_str(alloc, fill, (const uint8_t *)cbuf);
|
||||
|
||||
return st;
|
||||
}
|
||||
@ -125,17 +123,16 @@ unsupervise(rust_task *task) {
|
||||
extern "C" CDECL rust_vec*
|
||||
vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts)
|
||||
{
|
||||
rust_scheduler *sched = task->sched;
|
||||
LOG(task, mem, "vec_alloc %" PRIdPTR " elements of size %" PRIdPTR,
|
||||
n_elts, elem_t->size);
|
||||
size_t fill = n_elts * elem_t->size;
|
||||
size_t alloc = next_power_of_two(sizeof(rust_vec) + fill);
|
||||
void *mem = task->malloc(alloc, t->is_stateful ? t : NULL);
|
||||
void *mem = task->malloc(alloc, "rust_vec", t->is_stateful ? t : NULL);
|
||||
if (!mem) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
}
|
||||
rust_vec *vec = new (mem) rust_vec(sched, alloc, 0, NULL);
|
||||
rust_vec *vec = new (mem) rust_vec(alloc, 0, NULL);
|
||||
return vec;
|
||||
}
|
||||
|
||||
@ -199,11 +196,10 @@ vec_alloc_with_data(rust_task *task,
|
||||
size_t elt_size,
|
||||
void *d)
|
||||
{
|
||||
rust_scheduler *sched = task->sched;
|
||||
size_t alloc = next_power_of_two(sizeof(rust_vec) + (n_elts * elt_size));
|
||||
void *mem = task->malloc(alloc);
|
||||
void *mem = task->malloc(alloc, "rust_vec (with data)");
|
||||
if (!mem) return NULL;
|
||||
return new (mem) rust_vec(sched, alloc, fill * elt_size, (uint8_t*)d);
|
||||
return new (mem) rust_vec(alloc, fill * elt_size, (uint8_t*)d);
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_vec*
|
||||
@ -377,7 +373,7 @@ extern "C" CDECL void *
|
||||
rand_new(rust_task *task)
|
||||
{
|
||||
rust_scheduler *sched = task->sched;
|
||||
randctx *rctx = (randctx *) task->malloc(sizeof(randctx));
|
||||
randctx *rctx = (randctx *) task->malloc(sizeof(randctx), "randctx");
|
||||
if (!rctx) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
@ -619,8 +615,9 @@ rust_list_files_ivec(rust_task *task, rust_str *path) {
|
||||
sizeof(size_t) // fill
|
||||
+ sizeof(size_t) // alloc
|
||||
+ sizeof(rust_str *) * 4; // payload
|
||||
rust_box *box = (rust_box *)task->malloc(sizeof(rust_box) +
|
||||
str_ivec_sz);
|
||||
rust_box *box = (rust_box *)task->malloc(sizeof(rust_box) + str_ivec_sz,
|
||||
"rust_box(list_files_ivec)");
|
||||
|
||||
box->ref_count = 1;
|
||||
rust_ivec *iv = (rust_ivec *)&box->data;
|
||||
iv->fill = 0;
|
||||
@ -628,7 +625,7 @@ rust_list_files_ivec(rust_task *task, rust_str *path) {
|
||||
size_t alloc_sz = sizeof(rust_str *) * strings.size();
|
||||
iv->alloc = alloc_sz;
|
||||
iv->payload.ptr = (rust_ivec_heap *)
|
||||
task->kernel->malloc(alloc_sz + sizeof(size_t));
|
||||
task->kernel->malloc(alloc_sz + sizeof(size_t), "files ivec");
|
||||
iv->payload.ptr->fill = alloc_sz;
|
||||
memcpy(&iv->payload.ptr->data, strings.data(), alloc_sz);
|
||||
return box;
|
||||
@ -706,7 +703,8 @@ ivec_reserve(rust_task *task, type_desc *ty, rust_ivec *v, size_t n_elems)
|
||||
if (v->fill || !v->payload.ptr) {
|
||||
// On stack; spill to heap.
|
||||
heap_part = (rust_ivec_heap *)task->malloc(new_alloc +
|
||||
sizeof(size_t));
|
||||
sizeof(size_t),
|
||||
"ivec reserve heap part");
|
||||
heap_part->fill = v->fill;
|
||||
memcpy(&heap_part->data, v->payload.data, v->fill);
|
||||
|
||||
@ -737,8 +735,9 @@ ivec_reserve_shared(rust_task *task, type_desc *ty, rust_ivec *v,
|
||||
rust_ivec_heap *heap_part;
|
||||
if (v->fill || !v->payload.ptr) {
|
||||
// On stack; spill to heap.
|
||||
heap_part = (rust_ivec_heap *)task->kernel->malloc(new_alloc +
|
||||
sizeof(size_t));
|
||||
heap_part = (rust_ivec_heap *)
|
||||
task->kernel->malloc(new_alloc + sizeof(size_t),
|
||||
"ivec reserve shared");
|
||||
heap_part->fill = v->fill;
|
||||
memcpy(&heap_part->data, v->payload.data, v->fill);
|
||||
|
||||
|
@ -11,22 +11,21 @@ rust_chan::rust_chan(rust_task *task,
|
||||
kernel(task->kernel),
|
||||
task(task),
|
||||
port(port),
|
||||
buffer(task, unit_sz) {
|
||||
++task->ref_count;
|
||||
buffer(kernel, unit_sz) {
|
||||
if (port) {
|
||||
associate(port);
|
||||
}
|
||||
LOG(task, comm, "new rust_chan(task=0x%" PRIxPTR
|
||||
DLOG(kernel->sched, comm, "new rust_chan(task=0x%" PRIxPTR
|
||||
", port=0x%" PRIxPTR ") -> chan=0x%" PRIxPTR,
|
||||
(uintptr_t) task, (uintptr_t) port, (uintptr_t) this);
|
||||
}
|
||||
|
||||
rust_chan::~rust_chan() {
|
||||
LOG(task, comm, "del rust_chan(task=0x%" PRIxPTR ")", (uintptr_t) this);
|
||||
DLOG(kernel->sched, comm, "del rust_chan(task=0x%" PRIxPTR ")",
|
||||
(uintptr_t) this);
|
||||
|
||||
A(task->sched, is_associated() == false,
|
||||
A(kernel->sched, is_associated() == false,
|
||||
"Channel must be disassociated before being freed.");
|
||||
--task->ref_count;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -35,10 +34,11 @@ rust_chan::~rust_chan() {
|
||||
void rust_chan::associate(maybe_proxy<rust_port> *port) {
|
||||
this->port = port;
|
||||
if (port->is_proxy() == false) {
|
||||
LOG(task, task,
|
||||
DLOG(kernel->sched, task,
|
||||
"associating chan: 0x%" PRIxPTR " with port: 0x%" PRIxPTR,
|
||||
this, port);
|
||||
++this->ref_count;
|
||||
this->task = port->referent()->task;
|
||||
this->port->referent()->chans.push(this);
|
||||
}
|
||||
}
|
||||
@ -51,14 +51,15 @@ bool rust_chan::is_associated() {
|
||||
* Unlink this channel from its associated port.
|
||||
*/
|
||||
void rust_chan::disassociate() {
|
||||
A(task->sched, is_associated(),
|
||||
A(kernel->sched, is_associated(),
|
||||
"Channel must be associated with a port.");
|
||||
|
||||
if (port->is_proxy() == false) {
|
||||
LOG(task, task,
|
||||
DLOG(kernel->sched, task,
|
||||
"disassociating chan: 0x%" PRIxPTR " from port: 0x%" PRIxPTR,
|
||||
this, port->referent());
|
||||
--this->ref_count;
|
||||
this->task = NULL;
|
||||
port->referent()->chans.swap_delete(this);
|
||||
}
|
||||
|
||||
@ -72,7 +73,7 @@ void rust_chan::disassociate() {
|
||||
void rust_chan::send(void *sptr) {
|
||||
buffer.enqueue(sptr);
|
||||
|
||||
rust_scheduler *sched = task->sched;
|
||||
rust_scheduler *sched = kernel->sched;
|
||||
if (!is_associated()) {
|
||||
W(sched, is_associated(),
|
||||
"rust_chan::transmit with no associated port.");
|
||||
@ -112,11 +113,12 @@ rust_chan *rust_chan::clone(maybe_proxy<rust_task> *target) {
|
||||
rust_handle<rust_port> *handle =
|
||||
task->sched->kernel->get_port_handle(port->as_referent());
|
||||
maybe_proxy<rust_port> *proxy = new rust_proxy<rust_port> (handle);
|
||||
LOG(task, mem, "new proxy: " PTR, proxy);
|
||||
DLOG(kernel->sched, mem, "new proxy: " PTR, proxy);
|
||||
port = proxy;
|
||||
target_task = target->as_proxy()->handle()->referent();
|
||||
}
|
||||
return new (target_task->kernel) rust_chan(target_task, port, unit_sz);
|
||||
return new (target_task->kernel, "cloned chan")
|
||||
rust_chan(target_task, port, unit_sz);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -124,7 +126,7 @@ rust_chan *rust_chan::clone(maybe_proxy<rust_task> *target) {
|
||||
* appear to be live, causing modify-after-free errors.
|
||||
*/
|
||||
void rust_chan::destroy() {
|
||||
A(task->sched, ref_count == 0,
|
||||
A(kernel->sched, ref_count == 0,
|
||||
"Channel's ref count should be zero.");
|
||||
|
||||
if (is_associated()) {
|
||||
|
@ -10,7 +10,7 @@ public:
|
||||
~rust_chan();
|
||||
|
||||
rust_kernel *kernel;
|
||||
rust_task *task;
|
||||
smart_ptr<rust_task> task;
|
||||
maybe_proxy<rust_port> *port;
|
||||
size_t idx;
|
||||
circular_buffer buffer;
|
||||
|
@ -16,7 +16,8 @@ rust_crate_cache::get_type_desc(size_t size,
|
||||
return td;
|
||||
}
|
||||
DLOG(sched, cache, "rust_crate_cache::get_type_desc miss");
|
||||
td = (type_desc*) sched->kernel->malloc(sizeof(type_desc) + keysz);
|
||||
td = (type_desc*) sched->kernel->malloc(sizeof(type_desc) + keysz,
|
||||
"crate cache typedesc");
|
||||
if (!td)
|
||||
return NULL;
|
||||
// By convention, desc 0 is the root descriptor.
|
||||
|
@ -115,21 +115,59 @@ template <typename T> struct rc_base {
|
||||
};
|
||||
|
||||
template <typename T> struct task_owned {
|
||||
inline void *operator new(size_t size, rust_task *task);
|
||||
inline void *operator new(size_t size, rust_task *task, const char *tag);
|
||||
|
||||
inline void *operator new[](size_t size, rust_task *task);
|
||||
inline void *operator new[](size_t size, rust_task *task,
|
||||
const char *tag);
|
||||
|
||||
inline void *operator new(size_t size, rust_task &task);
|
||||
inline void *operator new(size_t size, rust_task &task, const char *tag);
|
||||
|
||||
inline void *operator new[](size_t size, rust_task &task);
|
||||
inline void *operator new[](size_t size, rust_task &task,
|
||||
const char *tag);
|
||||
|
||||
void operator delete(void *ptr) {
|
||||
((T *)ptr)->task->free(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
template<class T>
|
||||
class smart_ptr {
|
||||
T *p;
|
||||
|
||||
smart_ptr(const smart_ptr &sp) : p(sp.p) {
|
||||
if(p) { p->ref(); }
|
||||
}
|
||||
|
||||
public:
|
||||
smart_ptr() : p(NULL) {};
|
||||
smart_ptr(T *p) : p(p) { if(p) { p->ref(); } }
|
||||
|
||||
~smart_ptr() {
|
||||
if(p) {
|
||||
p->deref();
|
||||
}
|
||||
}
|
||||
|
||||
T *operator=(T* p) {
|
||||
if(this->p) {
|
||||
this->p->deref();
|
||||
}
|
||||
if(p) {
|
||||
p->ref();
|
||||
}
|
||||
this->p = p;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
T *operator->() const { return p; };
|
||||
|
||||
operator T*() const { return p; }
|
||||
};
|
||||
|
||||
template <typename T> struct kernel_owned {
|
||||
inline void *operator new(size_t size, rust_kernel *kernel);
|
||||
inline void *operator new(size_t size, rust_kernel *kernel,
|
||||
const char *tag);
|
||||
|
||||
void operator delete(void *ptr) {
|
||||
((T *)ptr)->kernel->free(ptr);
|
||||
|
@ -8,7 +8,7 @@
|
||||
} while (0)
|
||||
|
||||
rust_kernel::rust_kernel(rust_srv *srv) :
|
||||
_region(&srv->local_region),
|
||||
_region(srv, true),
|
||||
_log(srv, NULL),
|
||||
_srv(srv),
|
||||
_interrupt_kernel_loop(FALSE)
|
||||
@ -20,10 +20,11 @@ rust_scheduler *
|
||||
rust_kernel::create_scheduler(const char *name) {
|
||||
_kernel_lock.lock();
|
||||
rust_message_queue *message_queue =
|
||||
new (this) rust_message_queue(_srv, this);
|
||||
new (this, "rust_message_queue") rust_message_queue(_srv, this);
|
||||
rust_srv *srv = _srv->clone();
|
||||
rust_scheduler *sched =
|
||||
new (this) rust_scheduler(this, message_queue, srv, name);
|
||||
new (this, "rust_scheduler")
|
||||
rust_scheduler(this, message_queue, srv, name);
|
||||
rust_handle<rust_scheduler> *handle = internal_get_sched_handle(sched);
|
||||
message_queue->associate(handle);
|
||||
message_queues.append(message_queue);
|
||||
@ -51,10 +52,8 @@ rust_handle<rust_scheduler> *
|
||||
rust_kernel::internal_get_sched_handle(rust_scheduler *sched) {
|
||||
rust_handle<rust_scheduler> *handle = NULL;
|
||||
if (_sched_handles.get(sched, &handle) == false) {
|
||||
handle =
|
||||
new (this) rust_handle<rust_scheduler>(this,
|
||||
sched->message_queue,
|
||||
sched);
|
||||
handle = new (this, "rust_handle<rust_scheduler")
|
||||
rust_handle<rust_scheduler>(this, sched->message_queue, sched);
|
||||
_sched_handles.put(sched, handle);
|
||||
}
|
||||
return handle;
|
||||
@ -74,9 +73,8 @@ rust_kernel::get_task_handle(rust_task *task) {
|
||||
rust_handle<rust_task> *handle = NULL;
|
||||
if (_task_handles.get(task, &handle) == false) {
|
||||
handle =
|
||||
new (this) rust_handle<rust_task>(this,
|
||||
task->sched->message_queue,
|
||||
task);
|
||||
new (this, "rust_handle<rust_task>")
|
||||
rust_handle<rust_task>(this, task->sched->message_queue, task);
|
||||
_task_handles.put(task, handle);
|
||||
}
|
||||
_kernel_lock.unlock();
|
||||
@ -88,7 +86,7 @@ rust_kernel::get_port_handle(rust_port *port) {
|
||||
_kernel_lock.lock();
|
||||
rust_handle<rust_port> *handle = NULL;
|
||||
if (_port_handles.get(port, &handle) == false) {
|
||||
handle = new (this)
|
||||
handle = new (this, "rust_handle<rust_port>")
|
||||
rust_handle<rust_port>(this,
|
||||
port->task->sched->message_queue,
|
||||
port);
|
||||
@ -202,17 +200,17 @@ rust_kernel::~rust_kernel() {
|
||||
}
|
||||
|
||||
void *
|
||||
rust_kernel::malloc(size_t size) {
|
||||
return _region->malloc(size);
|
||||
rust_kernel::malloc(size_t size, const char *tag) {
|
||||
return _region.malloc(size, tag);
|
||||
}
|
||||
|
||||
void *
|
||||
rust_kernel::realloc(void *mem, size_t size) {
|
||||
return _region->realloc(mem, size);
|
||||
return _region.realloc(mem, size);
|
||||
}
|
||||
|
||||
void rust_kernel::free(void *mem) {
|
||||
_region->free(mem);
|
||||
_region.free(mem);
|
||||
}
|
||||
|
||||
template<class T> void
|
||||
|
@ -43,7 +43,7 @@ class rust_task_thread;
|
||||
* threads.
|
||||
*/
|
||||
class rust_kernel : public rust_thread {
|
||||
memory_region *_region;
|
||||
memory_region _region;
|
||||
rust_log _log;
|
||||
rust_srv *_srv;
|
||||
|
||||
@ -109,7 +109,7 @@ public:
|
||||
void fatal(char const *fmt, ...);
|
||||
virtual ~rust_kernel();
|
||||
|
||||
void *malloc(size_t size);
|
||||
void *malloc(size_t size, const char *tag);
|
||||
void *realloc(void *mem, size_t size);
|
||||
void free(void *mem);
|
||||
|
||||
|
@ -47,7 +47,8 @@ send(notification_type type, const char* label,
|
||||
rust_handle<rust_task> *source, rust_handle<rust_task> *target) {
|
||||
memory_region *region = &target->message_queue->region;
|
||||
notify_message *message =
|
||||
new (region) notify_message(region, type, label, source, target);
|
||||
new (region, "notify_message")
|
||||
notify_message(region, type, label, source, target);
|
||||
target->message_queue->enqueue(message);
|
||||
}
|
||||
|
||||
@ -91,8 +92,8 @@ send(uint8_t *buffer, size_t buffer_sz, const char* label,
|
||||
|
||||
memory_region *region = &port->message_queue->region;
|
||||
data_message *message =
|
||||
new (region) data_message(region, buffer, buffer_sz, label, source,
|
||||
port);
|
||||
new (region, "data_message")
|
||||
data_message(region, buffer, buffer_sz, label, source, port);
|
||||
LOG(source->referent(), comm, "==> sending \"%s\"" PTR " in queue " PTR,
|
||||
label, message, &port->message_queue);
|
||||
port->message_queue->enqueue(message);
|
||||
|
@ -10,7 +10,8 @@ rust_port::rust_port(rust_task *task, size_t unit_sz)
|
||||
PRIxPTR, (uintptr_t)task, unit_sz, (uintptr_t)this);
|
||||
|
||||
// Allocate a remote channel, for remote channel data.
|
||||
remote_channel = new (task->kernel) rust_chan(task, this, unit_sz);
|
||||
remote_channel = new (task->kernel, "remote chan")
|
||||
rust_chan(task, this, unit_sz);
|
||||
}
|
||||
|
||||
rust_port::~rust_port() {
|
||||
|
@ -290,7 +290,8 @@ rust_scheduler::get_cache() {
|
||||
rust_task *
|
||||
rust_scheduler::create_task(rust_task *spawner, const char *name) {
|
||||
rust_task *task =
|
||||
new (this->kernel) rust_task (this, &newborn_tasks, spawner, name);
|
||||
new (this->kernel, "rust_task")
|
||||
rust_task (this, &newborn_tasks, spawner, name);
|
||||
DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",
|
||||
task, spawner ? spawner->name : "null", name);
|
||||
if(spawner) {
|
||||
|
@ -34,7 +34,7 @@ new_stk(rust_task *task, size_t minsz)
|
||||
if (minsz < min_stk_bytes)
|
||||
minsz = min_stk_bytes;
|
||||
size_t sz = sizeof(stk_seg) + minsz;
|
||||
stk_seg *stk = (stk_seg *)task->malloc(sz);
|
||||
stk_seg *stk = (stk_seg *)task->malloc(sz, "stack");
|
||||
LOGPTR(task->sched, "new stk", (uintptr_t)stk);
|
||||
memset(stk, 0, sizeof(stk_seg));
|
||||
stk->limit = (uintptr_t) &stk->data[minsz];
|
||||
@ -326,7 +326,7 @@ rust_task::unlink_gc(gc_alloc *gcm) {
|
||||
}
|
||||
|
||||
void *
|
||||
rust_task::malloc(size_t sz, type_desc *td)
|
||||
rust_task::malloc(size_t sz, const char *tag, type_desc *td)
|
||||
{
|
||||
// FIXME: GC is disabled for now.
|
||||
// GC-memory classification is all wrong.
|
||||
@ -335,7 +335,8 @@ rust_task::malloc(size_t sz, type_desc *td)
|
||||
if (td) {
|
||||
sz += sizeof(gc_alloc);
|
||||
}
|
||||
void *mem = local_region.malloc(sz);
|
||||
|
||||
void *mem = local_region.malloc(sz, tag);
|
||||
if (!mem)
|
||||
return mem;
|
||||
if (td) {
|
||||
@ -488,8 +489,8 @@ bool rust_task::can_schedule(int id)
|
||||
}
|
||||
|
||||
void *
|
||||
rust_task::calloc(size_t size) {
|
||||
return local_region.calloc(size);
|
||||
rust_task::calloc(size_t size, const char *tag) {
|
||||
return local_region.calloc(size, tag);
|
||||
}
|
||||
|
||||
void rust_task::pin() {
|
||||
|
@ -112,7 +112,7 @@ rust_task : public maybe_proxy<rust_task>,
|
||||
|
||||
void link_gc(gc_alloc *gcm);
|
||||
void unlink_gc(gc_alloc *gcm);
|
||||
void *malloc(size_t sz, type_desc *td=0);
|
||||
void *malloc(size_t sz, const char *tag, type_desc *td=0);
|
||||
void *realloc(void *data, size_t sz, bool gc_mem=false);
|
||||
void free(void *p, bool gc_mem=false);
|
||||
|
||||
@ -157,7 +157,7 @@ rust_task : public maybe_proxy<rust_task>,
|
||||
|
||||
bool can_schedule(int worker);
|
||||
|
||||
void *calloc(size_t size);
|
||||
void *calloc(size_t size, const char *tag);
|
||||
|
||||
void pin();
|
||||
void pin(int id);
|
||||
|
@ -104,7 +104,7 @@ upcall_new_port(rust_task *task, size_t unit_sz) {
|
||||
(uintptr_t) task, task->name, unit_sz);
|
||||
// take a reference on behalf of the port
|
||||
task->ref();
|
||||
return new (task->kernel) rust_port(task, unit_sz);
|
||||
return new (task->kernel, "rust_port") rust_port(task, unit_sz);
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
@ -129,7 +129,8 @@ upcall_new_chan(rust_task *task, rust_port *port) {
|
||||
"task=0x%" PRIxPTR " (%s), port=0x%" PRIxPTR ")",
|
||||
(uintptr_t) task, task->name, port);
|
||||
I(sched, port);
|
||||
return new (task->kernel) rust_chan(task, port, port->unit_sz);
|
||||
return new (task->kernel, "rust_chan")
|
||||
rust_chan(task, port, port->unit_sz);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -152,8 +153,6 @@ extern "C" CDECL
|
||||
void upcall_del_chan(rust_task *task, rust_chan *chan) {
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
|
||||
I(task->sched, chan->task == task);
|
||||
|
||||
LOG(task, comm, "upcall del_chan(0x%" PRIxPTR ")", (uintptr_t) chan);
|
||||
chan->destroy();
|
||||
}
|
||||
@ -169,6 +168,14 @@ upcall_clone_chan(rust_task *task, maybe_proxy<rust_task> *target,
|
||||
return chan->clone(target);
|
||||
}
|
||||
|
||||
extern "C" CDECL rust_task *
|
||||
upcall_chan_target_task(rust_task *task, rust_chan *chan) {
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
I(task->sched, !chan->port->is_proxy());
|
||||
|
||||
return chan->port->referent()->task;
|
||||
}
|
||||
|
||||
extern "C" CDECL void
|
||||
upcall_yield(rust_task *task) {
|
||||
LOG_UPCALL_ENTRY(task);
|
||||
@ -277,7 +284,10 @@ upcall_malloc(rust_task *task, size_t nbytes, type_desc *td) {
|
||||
" with gc-chain head = 0x%" PRIxPTR,
|
||||
nbytes, td, task->gc_alloc_chain);
|
||||
|
||||
void *p = task->malloc(nbytes, td);
|
||||
// TODO: Maybe use dladdr here to find a more useful name for the
|
||||
// type_desc.
|
||||
|
||||
void *p = task->malloc(nbytes, "tdesc", td);
|
||||
|
||||
LOG(task, mem,
|
||||
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR
|
||||
@ -308,7 +318,7 @@ upcall_shared_malloc(rust_task *task, size_t nbytes, type_desc *td) {
|
||||
LOG(task, mem,
|
||||
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
|
||||
nbytes, td);
|
||||
void *p = task->kernel->malloc(nbytes);
|
||||
void *p = task->kernel->malloc(nbytes, "shared malloc");
|
||||
LOG(task, mem,
|
||||
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR
|
||||
") = 0x%" PRIxPTR,
|
||||
@ -346,14 +356,13 @@ upcall_mark(rust_task *task, void* ptr) {
|
||||
}
|
||||
|
||||
rust_str *make_str(rust_task *task, char const *s, size_t fill) {
|
||||
rust_scheduler *sched = task->sched;
|
||||
size_t alloc = next_power_of_two(sizeof(rust_str) + fill);
|
||||
void *mem = task->malloc(alloc);
|
||||
void *mem = task->malloc(alloc, "rust_str (make_str)");
|
||||
if (!mem) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
}
|
||||
rust_str *st = new (mem) rust_str(sched, alloc, fill,
|
||||
rust_str *st = new (mem) rust_str(alloc, fill,
|
||||
(uint8_t const *) s);
|
||||
LOG(task, mem,
|
||||
"upcall new_str('%s', %" PRIdPTR ") = 0x%" PRIxPTR,
|
||||
@ -381,12 +390,12 @@ upcall_new_vec(rust_task *task, size_t fill, type_desc *td) {
|
||||
rust_scheduler *sched = task->sched;
|
||||
DLOG(sched, mem, "upcall new_vec(%" PRIdPTR ")", fill);
|
||||
size_t alloc = next_power_of_two(sizeof(rust_vec) + fill);
|
||||
void *mem = task->malloc(alloc, td);
|
||||
void *mem = task->malloc(alloc, "rust_vec (upcall_new_vec)", td);
|
||||
if (!mem) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
}
|
||||
rust_vec *v = new (mem) rust_vec(sched, alloc, 0, NULL);
|
||||
rust_vec *v = new (mem) rust_vec(alloc, 0, NULL);
|
||||
LOG(task, mem,
|
||||
"upcall new_vec(%" PRIdPTR ") = 0x%" PRIxPTR, fill, v);
|
||||
return v;
|
||||
@ -441,7 +450,7 @@ vec_grow(rust_task *task,
|
||||
* that we need the copies performed for us.
|
||||
*/
|
||||
LOG(task, mem, "new vec path");
|
||||
void *mem = task->malloc(alloc, td);
|
||||
void *mem = task->malloc(alloc, "rust_vec (vec_grow)", td);
|
||||
if (!mem) {
|
||||
task->fail();
|
||||
return NULL;
|
||||
@ -450,7 +459,7 @@ vec_grow(rust_task *task,
|
||||
if (v->ref_count != CONST_REFCOUNT)
|
||||
v->deref();
|
||||
|
||||
v = new (mem) rust_vec(sched, alloc, 0, NULL);
|
||||
v = new (mem) rust_vec(alloc, 0, NULL);
|
||||
*need_copy = 1;
|
||||
}
|
||||
I(sched, sizeof(rust_vec) + v->fill <= v->alloc);
|
||||
@ -599,7 +608,8 @@ upcall_ivec_spill_shared(rust_task *task,
|
||||
size_t new_alloc = next_power_of_two(newsz);
|
||||
|
||||
rust_ivec_heap *heap_part = (rust_ivec_heap *)
|
||||
task->kernel->malloc(new_alloc + sizeof(size_t));
|
||||
task->kernel->malloc(new_alloc + sizeof(size_t),
|
||||
"ivec spill shared");
|
||||
heap_part->fill = newsz;
|
||||
memcpy(&heap_part->data, v->payload.data, v->fill);
|
||||
|
||||
|
@ -23,7 +23,7 @@ ptr_vec<T>::ptr_vec(rust_task *task) :
|
||||
task(task),
|
||||
alloc(INIT_SIZE),
|
||||
fill(0),
|
||||
data(new (task) T*[alloc])
|
||||
data(new (task, "ptr_vec<T>") T*[alloc])
|
||||
{
|
||||
I(task->sched, data);
|
||||
DLOG(task->sched, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR,
|
||||
@ -175,7 +175,7 @@ rust_vec : public rc_base<rust_vec>
|
||||
size_t fill;
|
||||
size_t pad; // Pad to align data[0] to 16 bytes.
|
||||
uint8_t data[];
|
||||
rust_vec(rust_scheduler *sched, size_t alloc, size_t fill,
|
||||
rust_vec(size_t alloc, size_t fill,
|
||||
uint8_t const *d)
|
||||
: alloc(alloc),
|
||||
fill(fill)
|
||||
|
@ -53,6 +53,7 @@ task_yield
|
||||
task_join
|
||||
unsafe_vec_to_mut
|
||||
unsupervise
|
||||
upcall_chan_target_task
|
||||
upcall_clone_chan
|
||||
upcall_del_chan
|
||||
upcall_del_port
|
||||
|
@ -6,17 +6,30 @@ use std;
|
||||
import std::task;
|
||||
|
||||
fn start(chan[chan[str]] c) {
|
||||
let port[str] p = port();
|
||||
let port[str] p;
|
||||
|
||||
p = port();
|
||||
c <| chan(p);
|
||||
auto a; p |> a;
|
||||
// auto b; p |> b; // Never read the second string.
|
||||
p |> a;
|
||||
log_err a;
|
||||
p |> b;
|
||||
log_err b;
|
||||
|
||||
auto a;
|
||||
auto b;
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let port[chan[str]] p = port();
|
||||
auto child = spawn start(chan(p));
|
||||
auto c; p |> c;
|
||||
let port[chan[str]] p;
|
||||
auto child;
|
||||
|
||||
p = port();
|
||||
child = spawn start(chan(p));
|
||||
|
||||
p |> c;
|
||||
c <| "A";
|
||||
c <| "B";
|
||||
task::yield();
|
||||
|
||||
auto c;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user