1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
|
/*
* Copyright 2015 WebAssembly Community Group participants
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef wasm_mixed_arena_h
#define wasm_mixed_arena_h
#include <atomic>
#include <cassert>
#include <memory>
#include <mutex>
#include <thread>
#include <vector>
//
// Arena allocation for mixed-type data.
//
// Arena-style bump allocation is important for two reasons: First, so that
// allocation is quick, and second, so that allocated items are close together,
// which is cache-friendy. Arena allocation is also useful for a minor third
// reason which is to make freeing all the items in an arena very quick.
//
// Each WebAssembly Module has an arena allocator, which should be used
// for all of its AST nodes and so forth. When the Module is destroyed, the
// entire arena is cleaned up.
//
// When allocating an object in an arena, the object's proper constructor
// is called. Note that destructors are not called, because to make the
// arena simple and fast we do not track internal allocations inside it
// (and we can also avoid the need for virtual destructors).
//
// In general, optimization passes avoid allocation as much as possible.
// Many passes only remove or modify nodes anyhow, others can often
// reuse nodes that are being optimized out. This keeps things
// cache-friendly, and also makes the operations trivially thread-safe.
// In the rare case that a pass does need to allocate, and it is a
// parallel pass (so multiple threads might access the allocator),
// the MixedArena instance will notice if it is on a different thread
// than that arena's original thread, and will perform the allocation
// in a side arena for that other thread. This is done in a transparent
// way to the outside; as a result, it is always safe to allocate using
// a MixedArena, no matter which thread you are on. Allocations will
// of course be fastest on the original thread for the arena.
//
struct MixedArena {
// fast bump allocation
std::vector<char*> chunks;
int index; // in last chunk
// multithreaded allocation - each arena is valid on a specific thread.
// if we are on the wrong thread, we safely look in the linked
// list of next, adding an allocator if necessary
// TODO: we don't really need locking here, atomics could suffice
std::thread::id threadId;
std::mutex mutex;
MixedArena* next;
MixedArena() {
threadId = std::this_thread::get_id();
next = nullptr;
}
void* allocSpace(size_t size) {
// the bump allocator data should not be modified by multiple threads at once.
if (std::this_thread::get_id() != threadId) {
// TODO use a fast double-checked locking pattern.
std::lock_guard<std::mutex> lock(mutex);
MixedArena* curr = this;
while (std::this_thread::get_id() != curr->threadId) {
if (curr->next) {
curr = curr->next;
} else {
curr->next = new MixedArena(); // will have our thread id
}
}
return curr->allocSpace(size);
}
const size_t CHUNK = 10000;
size = (size + 7) & (-8); // same alignment as malloc TODO optimize?
assert(size < CHUNK);
if (chunks.size() == 0 || index + size >= CHUNK) {
chunks.push_back(new char[CHUNK]);
index = 0;
}
auto* ret = chunks.back() + index;
index += size;
return static_cast<void*>(ret);
}
template<class T>
T* alloc() {
auto* ret = static_cast<T*>(allocSpace(sizeof(T)));
new (ret) T();
return ret;
}
void clear() {
for (char* chunk : chunks) {
delete[] chunk;
}
chunks.clear();
}
~MixedArena() {
clear();
if (next) delete next;
}
};
#endif // wasm_mixed_arena_h
|