rt: Change the way the kernel exits to avoid pthread leaks

This makes the kernel join every scheduler thread before exiting in order to
ensure that all threads are completely terminated before the process exits. On
my machine, for 32-bit targets, this was causing regular valgrind errors.
This commit is contained in:
Brian Anderson 2012-02-27 13:36:54 -08:00
parent e4c027446e
commit b3f77bf927
5 changed files with 42 additions and 26 deletions

View File

@ -20,7 +20,6 @@ rust_kernel::rust_kernel(rust_srv *srv) :
live_tasks(0), live_tasks(0),
max_task_id(0), max_task_id(0),
rval(0), rval(0),
live_schedulers(0),
max_sched_id(0), max_sched_id(0),
env(srv->env) env(srv->env)
{ {
@ -75,7 +74,6 @@ rust_kernel::create_scheduler(size_t num_threads) {
bool is_new = sched_table bool is_new = sched_table
.insert(std::pair<rust_sched_id, rust_scheduler*>(id, sched)).second; .insert(std::pair<rust_sched_id, rust_scheduler*>(id, sched)).second;
A(this, is_new, "Reusing a sched id?"); A(this, is_new, "Reusing a sched id?");
live_schedulers++;
} }
sched->start_task_threads(); sched->start_task_threads();
return id; return id;
@ -97,26 +95,38 @@ void
rust_kernel::release_scheduler_id(rust_sched_id id) { rust_kernel::release_scheduler_id(rust_sched_id id) {
I(this, !sched_lock.lock_held_by_current_thread()); I(this, !sched_lock.lock_held_by_current_thread());
scoped_lock with(sched_lock); scoped_lock with(sched_lock);
sched_map::iterator iter = sched_table.find(id); // This list will most likely only ever have a single element in it, but
I(this, iter != sched_table.end()); // it's an actual list because we could potentially get here multiple
rust_scheduler *sched = iter->second; // times before the main thread ever calls wait_for_schedulers()
sched_table.erase(iter); join_list.push_back(id);
delete sched; sched_lock.signal();
live_schedulers--;
if (live_schedulers == 0) {
// We're all done. Tell the main thread to continue
sched_lock.signal();
}
} }
/*
Called on the main thread to wait for the kernel to exit. This function is
also used to join on every terminating scheduler thread, so that we can be
sure they have completely exited before the process exits. If we don't join
them then we can see valgrind errors due to un-freed pthread memory.
*/
int int
rust_kernel::wait_for_schedulers() rust_kernel::wait_for_schedulers()
{ {
I(this, !sched_lock.lock_held_by_current_thread()); I(this, !sched_lock.lock_held_by_current_thread());
scoped_lock with(sched_lock); scoped_lock with(sched_lock);
// Schedulers could possibly have already exited while (!sched_table.empty()) {
if (live_schedulers != 0) { while (!join_list.empty()) {
sched_lock.wait(); rust_sched_id id = join_list.back();
join_list.pop_back();
sched_map::iterator iter = sched_table.find(id);
I(this, iter != sched_table.end());
rust_scheduler *sched = iter->second;
sched_table.erase(iter);
sched->join_task_threads();
delete sched;
}
if (!sched_table.empty()) {
sched_lock.wait();
}
} }
return rval; return rval;
} }

View File

@ -3,6 +3,7 @@
#define RUST_KERNEL_H #define RUST_KERNEL_H
#include <map> #include <map>
#include <vector>
#include "memory_region.h" #include "memory_region.h"
#include "rust_log.h" #include "rust_log.h"
@ -36,14 +37,15 @@ private:
lock_and_signal rval_lock; lock_and_signal rval_lock;
int rval; int rval;
// Protects live_schedulers, max_sched_id and sched_table // Protects max_sched_id and sched_table, join_list
lock_and_signal sched_lock; lock_and_signal sched_lock;
// Tracks the number of schedulers currently running. // The next scheduler id
// When this hits 0 we will signal the sched_lock and the
// kernel will terminate.
uintptr_t live_schedulers;
rust_sched_id max_sched_id; rust_sched_id max_sched_id;
// A map from scheduler ids to schedulers. When this is empty
// the kernel terminates
sched_map sched_table; sched_map sched_table;
// A list of scheduler ids that are ready to exit
std::vector<rust_sched_id> join_list;
public: public:

View File

@ -59,17 +59,21 @@ rust_scheduler::destroy_task_threads() {
void void
rust_scheduler::start_task_threads() rust_scheduler::start_task_threads()
{ {
// Copy num_threads because it's possible for the last thread
// to terminate and have the kernel delete us before we
// hit the last check against num_threads, in which case
// we would be accessing invalid memory.
uintptr_t num_threads = this->num_threads;
for(size_t i = 0; i < num_threads; ++i) { for(size_t i = 0; i < num_threads; ++i) {
rust_task_thread *thread = threads[i]; rust_task_thread *thread = threads[i];
thread->start(); thread->start();
} }
} }
void
rust_scheduler::join_task_threads()
{
for(size_t i = 0; i < num_threads; ++i) {
rust_task_thread *thread = threads[i];
thread->join();
}
}
void void
rust_scheduler::kill_all_tasks() { rust_scheduler::kill_all_tasks() {
for(size_t i = 0; i < num_threads; ++i) { for(size_t i = 0; i < num_threads; ++i) {

View File

@ -37,6 +37,7 @@ public:
~rust_scheduler(); ~rust_scheduler();
void start_task_threads(); void start_task_threads();
void join_task_threads();
void kill_all_tasks(); void kill_all_tasks();
rust_task_id create_task(rust_task *spawner, rust_task_id create_task(rust_task *spawner,
const char *name, const char *name,

View File

@ -320,7 +320,6 @@ rust_task_thread::create_task(rust_task *spawner, const char *name,
void rust_task_thread::run() { void rust_task_thread::run() {
this->start_main_loop(); this->start_main_loop();
detach();
sched->release_task_thread(); sched->release_task_thread();
} }