summaryrefslogtreecommitdiff
path: root/src/mixed_arena.h
diff options
context:
space:
mode:
authorAlon Zakai <alonzakai@gmail.com>2016-05-07 17:04:14 -0700
committerAlon Zakai <alonzakai@gmail.com>2016-05-07 17:04:14 -0700
commit59bf80e9844822698d66ec5cdb8fce963142b7ab (patch)
tree13d80d9ca2c46f5341332a8f1b7568246e04ee94 /src/mixed_arena.h
parentbd90eaea7daae8c57d611becd897b8b5ed395fe5 (diff)
downloadbinaryen-59bf80e9844822698d66ec5cdb8fce963142b7ab.tar.gz
binaryen-59bf80e9844822698d66ec5cdb8fce963142b7ab.tar.bz2
binaryen-59bf80e9844822698d66ec5cdb8fce963142b7ab.zip
use atomics on MixedArena list generation instead of locking (#456)
Diffstat (limited to 'src/mixed_arena.h')
-rw-r--r--src/mixed_arena.h46
1 files changed, 31 insertions, 15 deletions
diff --git a/src/mixed_arena.h b/src/mixed_arena.h
index 930bf3c0e..77aa052c1 100644
--- a/src/mixed_arena.h
+++ b/src/mixed_arena.h
@@ -61,32 +61,48 @@ struct MixedArena {
size_t chunkSize = 32768;
size_t index; // in last chunk
+ std::thread::id threadId;
+
// multithreaded allocation - each arena is valid on a specific thread.
- // if we are on the wrong thread, we safely look in the linked
+ // if we are on the wrong thread, we atomically look in the linked
// list of next, adding an allocator if necessary
- // TODO: we don't really need locking here, atomics could suffice
- std::thread::id threadId;
- std::mutex mutex;
- MixedArena* next;
+ std::atomic<MixedArena*> next;
MixedArena() {
threadId = std::this_thread::get_id();
- next = nullptr;
+ next.store(nullptr);
}
void* allocSpace(size_t size) {
// the bump allocator data should not be modified by multiple threads at once.
- if (std::this_thread::get_id() != threadId) {
- // TODO use a fast double-checked locking pattern.
- std::lock_guard<std::mutex> lock(mutex);
+ auto myId = std::this_thread::get_id();
+ if (myId != threadId) {
MixedArena* curr = this;
- while (std::this_thread::get_id() != curr->threadId) {
- if (curr->next) {
- curr = curr->next;
- } else {
- curr->next = new MixedArena(); // will have our thread id
+ MixedArena* allocated = nullptr;
+ while (myId != curr->threadId) {
+ auto seen = curr->next.load();
+ if (seen) {
+ curr = seen;
+ continue;
+ }
+ // there is a nullptr for next, so we may be able to place a new
+ // allocator for us there. but carefully, as others may do so as
+ // well. we may waste a few allocations here, but it doesn't matter
+ // as this can only happen as the chain is built up, i.e.,
+ // O(# of cores) per allocator, and our allocatrs are long-lived.
+ if (!allocated) {
+ allocated = new MixedArena(); // has our thread id
+ }
+ if (curr->next.compare_exchange_weak(seen, allocated)) {
+ // we replaced it, so we are the next in the chain
+ // we can forget about allocated, it is owned by the chain now
+ allocated = nullptr;
+ break;
}
+ // otherwise, the cmpxchg updated seen, and we continue to loop
+ curr = seen;
}
+ if (allocated) delete allocated;
return curr->allocSpace(size);
}
size = (size + 7) & (-8); // same alignment as malloc TODO optimize?
@@ -120,7 +136,7 @@ struct MixedArena {
~MixedArena() {
clear();
- if (next) delete next;
+ if (next.load()) delete next.load();
}
};