summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/abi/stack.h3
-rw-r--r--src/binaryen-c.cpp288
-rw-r--r--src/binaryen-c.h84
-rw-r--r--src/ir/import-utils.h14
-rw-r--r--src/ir/memory-utils.cpp5
-rw-r--r--src/ir/memory-utils.h18
-rw-r--r--src/ir/module-splitting.cpp11
-rw-r--r--src/ir/module-utils.h37
-rw-r--r--src/js/binaryen.js-post.js483
-rw-r--r--src/passes/AlignmentLowering.cpp72
-rw-r--r--src/passes/Asyncify.cpp36
-rw-r--r--src/passes/AvoidReinterprets.cpp10
-rw-r--r--src/passes/I64ToI32Lowering.cpp10
-rw-r--r--src/passes/InstrumentMemory.cpp13
-rw-r--r--src/passes/Memory64Lowering.cpp98
-rw-r--r--src/passes/MemoryPacking.cpp36
-rw-r--r--src/passes/Metrics.cpp9
-rw-r--r--src/passes/OptimizeInstructions.cpp100
-rw-r--r--src/passes/Print.cpp37
-rw-r--r--src/passes/RemoveNonJSOps.cpp2
-rw-r--r--src/passes/RemoveUnusedModuleElements.cpp12
-rw-r--r--src/passes/SafeHeap.cpp110
-rw-r--r--src/passes/SpillPointers.cpp5
-rw-r--r--src/passes/StackCheck.cpp6
-rw-r--r--src/shell-interface.h140
-rw-r--r--src/tools/fuzzing/fuzzing.cpp171
-rw-r--r--src/tools/wasm-ctor-eval.cpp100
-rw-r--r--src/tools/wasm-shell.cpp13
-rw-r--r--src/tools/wasm-split/instrumenter.cpp55
-rw-r--r--src/wasm-binary.h19
-rw-r--r--src/wasm-builder.h102
-rw-r--r--src/wasm-delegations-fields.def14
-rw-r--r--src/wasm-interpreter.h413
-rw-r--r--src/wasm-s-parser.h22
-rw-r--r--src/wasm-stack.h5
-rw-r--r--src/wasm-traversal.h4
-rw-r--r--src/wasm.h27
-rw-r--r--src/wasm/wasm-binary.cpp273
-rw-r--r--src/wasm/wasm-debug.cpp3
-rw-r--r--src/wasm/wasm-s-parser.cpp356
-rw-r--r--src/wasm/wasm-stack.cpp46
-rw-r--r--src/wasm/wasm-validator.cpp140
-rw-r--r--src/wasm/wasm.cpp32
-rw-r--r--src/wasm2js.h38
44 files changed, 2287 insertions, 1185 deletions
diff --git a/src/abi/stack.h b/src/abi/stack.h
index cc678b6e8..93a6e4cc1 100644
--- a/src/abi/stack.h
+++ b/src/abi/stack.h
@@ -50,7 +50,8 @@ getStackSpace(Index local, Function* func, Index size, Module& wasm) {
}
// align the size
size = stackAlign(size);
- auto pointerType = wasm.memory.indexType;
+ auto pointerType =
+ !wasm.memories.empty() ? wasm.memories[0]->indexType : Type::i32;
// TODO: find existing stack usage, and add on top of that - carefully
Builder builder(wasm);
auto* block = builder.makeBlock();
diff --git a/src/binaryen-c.cpp b/src/binaryen-c.cpp
index 1dd9fccd2..869f3a92a 100644
--- a/src/binaryen-c.cpp
+++ b/src/binaryen-c.cpp
@@ -1068,14 +1068,20 @@ BinaryenExpressionRef BinaryenLoad(BinaryenModuleRef module,
uint32_t offset,
uint32_t align,
BinaryenType type,
- BinaryenExpressionRef ptr) {
+ BinaryenExpressionRef ptr,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(Builder(*(Module*)module)
.makeLoad(bytes,
!!signed_,
offset,
align ? align : bytes,
(Expression*)ptr,
- Type(type)));
+ Type(type),
+ name));
}
BinaryenExpressionRef BinaryenStore(BinaryenModuleRef module,
uint32_t bytes,
@@ -1083,14 +1089,20 @@ BinaryenExpressionRef BinaryenStore(BinaryenModuleRef module,
uint32_t align,
BinaryenExpressionRef ptr,
BinaryenExpressionRef value,
- BinaryenType type) {
+ BinaryenType type,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(Builder(*(Module*)module)
.makeStore(bytes,
offset,
align ? align : bytes,
(Expression*)ptr,
(Expression*)value,
- Type(type)));
+ Type(type),
+ name));
}
BinaryenExpressionRef BinaryenConst(BinaryenModuleRef module,
BinaryenLiteral value) {
@@ -1137,13 +1149,24 @@ BinaryenExpressionRef BinaryenReturn(BinaryenModuleRef module,
auto* ret = Builder(*(Module*)module).makeReturn((Expression*)value);
return static_cast<Expression*>(ret);
}
-BinaryenExpressionRef BinaryenMemorySize(BinaryenModuleRef module) {
- auto* ret = Builder(*(Module*)module).makeMemorySize();
+BinaryenExpressionRef BinaryenMemorySize(BinaryenModuleRef module,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* ret = Builder(*(Module*)module).makeMemorySize(name);
return static_cast<Expression*>(ret);
}
BinaryenExpressionRef BinaryenMemoryGrow(BinaryenModuleRef module,
- BinaryenExpressionRef delta) {
- auto* ret = Builder(*(Module*)module).makeMemoryGrow((Expression*)delta);
+ BinaryenExpressionRef delta,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* ret =
+ Builder(*(Module*)module).makeMemoryGrow((Expression*)delta, name);
return static_cast<Expression*>(ret);
}
BinaryenExpressionRef BinaryenNop(BinaryenModuleRef module) {
@@ -1156,21 +1179,31 @@ BinaryenExpressionRef BinaryenAtomicLoad(BinaryenModuleRef module,
uint32_t bytes,
uint32_t offset,
BinaryenType type,
- BinaryenExpressionRef ptr) {
+ BinaryenExpressionRef ptr,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(
Builder(*(Module*)module)
- .makeAtomicLoad(bytes, offset, (Expression*)ptr, Type(type)));
+ .makeAtomicLoad(bytes, offset, (Expression*)ptr, Type(type), name));
}
BinaryenExpressionRef BinaryenAtomicStore(BinaryenModuleRef module,
uint32_t bytes,
uint32_t offset,
BinaryenExpressionRef ptr,
BinaryenExpressionRef value,
- BinaryenType type) {
+ BinaryenType type,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(
Builder(*(Module*)module)
.makeAtomicStore(
- bytes, offset, (Expression*)ptr, (Expression*)value, Type(type)));
+ bytes, offset, (Expression*)ptr, (Expression*)value, Type(type), name));
}
BinaryenExpressionRef BinaryenAtomicRMW(BinaryenModuleRef module,
BinaryenOp op,
@@ -1178,14 +1211,20 @@ BinaryenExpressionRef BinaryenAtomicRMW(BinaryenModuleRef module,
BinaryenIndex offset,
BinaryenExpressionRef ptr,
BinaryenExpressionRef value,
- BinaryenType type) {
+ BinaryenType type,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(Builder(*(Module*)module)
.makeAtomicRMW(AtomicRMWOp(op),
bytes,
offset,
(Expression*)ptr,
(Expression*)value,
- Type(type)));
+ Type(type),
+ name));
}
BinaryenExpressionRef BinaryenAtomicCmpxchg(BinaryenModuleRef module,
BinaryenIndex bytes,
@@ -1193,33 +1232,50 @@ BinaryenExpressionRef BinaryenAtomicCmpxchg(BinaryenModuleRef module,
BinaryenExpressionRef ptr,
BinaryenExpressionRef expected,
BinaryenExpressionRef replacement,
- BinaryenType type) {
+ BinaryenType type,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(Builder(*(Module*)module)
.makeAtomicCmpxchg(bytes,
offset,
(Expression*)ptr,
(Expression*)expected,
(Expression*)replacement,
- Type(type)));
+ Type(type),
+ name));
}
BinaryenExpressionRef BinaryenAtomicWait(BinaryenModuleRef module,
BinaryenExpressionRef ptr,
BinaryenExpressionRef expected,
BinaryenExpressionRef timeout,
- BinaryenType expectedType) {
+ BinaryenType expectedType,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(Builder(*(Module*)module)
.makeAtomicWait((Expression*)ptr,
(Expression*)expected,
(Expression*)timeout,
Type(expectedType),
- 0));
+ 0,
+ name));
}
BinaryenExpressionRef BinaryenAtomicNotify(BinaryenModuleRef module,
BinaryenExpressionRef ptr,
- BinaryenExpressionRef notifyCount) {
+ BinaryenExpressionRef notifyCount,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(
Builder(*(Module*)module)
- .makeAtomicNotify((Expression*)ptr, (Expression*)notifyCount, 0));
+ .makeAtomicNotify((Expression*)ptr, (Expression*)notifyCount, 0, name));
}
BinaryenExpressionRef BinaryenAtomicFence(BinaryenModuleRef module) {
return static_cast<Expression*>(Builder(*(Module*)module).makeAtomicFence());
@@ -1275,11 +1331,18 @@ BinaryenExpressionRef BinaryenSIMDLoad(BinaryenModuleRef module,
BinaryenOp op,
uint32_t offset,
uint32_t align,
- BinaryenExpressionRef ptr) {
- return static_cast<Expression*>(
- Builder(*(Module*)module)
- .makeSIMDLoad(
- SIMDLoadOp(op), Address(offset), Address(align), (Expression*)ptr));
+ BinaryenExpressionRef ptr,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ return static_cast<Expression*>(Builder(*(Module*)module)
+ .makeSIMDLoad(SIMDLoadOp(op),
+ Address(offset),
+ Address(align),
+ (Expression*)ptr,
+ name));
}
BinaryenExpressionRef BinaryenSIMDLoadStoreLane(BinaryenModuleRef module,
BinaryenOp op,
@@ -1287,7 +1350,12 @@ BinaryenExpressionRef BinaryenSIMDLoadStoreLane(BinaryenModuleRef module,
uint32_t align,
uint8_t index,
BinaryenExpressionRef ptr,
- BinaryenExpressionRef vec) {
+ BinaryenExpressionRef vec,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(
Builder(*(Module*)module)
.makeSIMDLoadStoreLane(SIMDLoadStoreLaneOp(op),
@@ -1295,17 +1363,25 @@ BinaryenExpressionRef BinaryenSIMDLoadStoreLane(BinaryenModuleRef module,
Address(align),
index,
(Expression*)ptr,
- (Expression*)vec));
+ (Expression*)vec,
+ name));
}
BinaryenExpressionRef BinaryenMemoryInit(BinaryenModuleRef module,
uint32_t segment,
BinaryenExpressionRef dest,
BinaryenExpressionRef offset,
- BinaryenExpressionRef size) {
- return static_cast<Expression*>(
- Builder(*(Module*)module)
- .makeMemoryInit(
- segment, (Expression*)dest, (Expression*)offset, (Expression*)size));
+ BinaryenExpressionRef size,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ return static_cast<Expression*>(Builder(*(Module*)module)
+ .makeMemoryInit(segment,
+ (Expression*)dest,
+ (Expression*)offset,
+ (Expression*)size,
+ name));
}
BinaryenExpressionRef BinaryenDataDrop(BinaryenModuleRef module,
@@ -1317,21 +1393,36 @@ BinaryenExpressionRef BinaryenDataDrop(BinaryenModuleRef module,
BinaryenExpressionRef BinaryenMemoryCopy(BinaryenModuleRef module,
BinaryenExpressionRef dest,
BinaryenExpressionRef source,
- BinaryenExpressionRef size) {
+ BinaryenExpressionRef size,
+ const char* destMemory,
+ const char* sourceMemory) {
+ // Maintaining compatibility for instructions with a single memory
+ if ((destMemory == nullptr || sourceMemory == nullptr) &&
+ module->memories.size() == 1) {
+ destMemory = module->memories[0]->name.c_str();
+ sourceMemory = module->memories[0]->name.c_str();
+ }
return static_cast<Expression*>(Builder(*(Module*)module)
.makeMemoryCopy((Expression*)dest,
(Expression*)source,
- (Expression*)size));
+ (Expression*)size,
+ destMemory,
+ sourceMemory));
}
BinaryenExpressionRef BinaryenMemoryFill(BinaryenModuleRef module,
BinaryenExpressionRef dest,
BinaryenExpressionRef value,
- BinaryenExpressionRef size) {
- return static_cast<Expression*>(Builder(*(Module*)module)
- .makeMemoryFill((Expression*)dest,
- (Expression*)value,
- (Expression*)size));
+ BinaryenExpressionRef size,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ return static_cast<Expression*>(
+ Builder(*(Module*)module)
+ .makeMemoryFill(
+ (Expression*)dest, (Expression*)value, (Expression*)size, name));
}
BinaryenExpressionRef BinaryenTupleMake(BinaryenModuleRef module,
@@ -3656,10 +3747,11 @@ void BinaryenAddMemoryImport(BinaryenModuleRef module,
const char* externalModuleName,
const char* externalBaseName,
uint8_t shared) {
- auto& memory = ((Module*)module)->memory;
- memory.module = externalModuleName;
- memory.base = externalBaseName;
- memory.shared = shared;
+ auto memory = Builder::makeMemory(internalName);
+ memory->module = externalModuleName;
+ memory->base = externalBaseName;
+ memory->shared = shared;
+ ((Module*)module)->addMemory(std::move(memory));
}
void BinaryenAddGlobalImport(BinaryenModuleRef module,
const char* internalName,
@@ -3871,7 +3963,7 @@ const char* BinaryenElementSegmentGetData(BinaryenElementSegmentRef elem,
}
}
-// Memory. One per module
+// Memory.
void BinaryenSetMemory(BinaryenModuleRef module,
BinaryenIndex initial,
@@ -3882,28 +3974,35 @@ void BinaryenSetMemory(BinaryenModuleRef module,
BinaryenExpressionRef* segmentOffsets,
BinaryenIndex* segmentSizes,
BinaryenIndex numSegments,
- bool shared) {
- auto* wasm = (Module*)module;
- wasm->memory.initial = initial;
- wasm->memory.max = int32_t(maximum); // Make sure -1 extends.
- wasm->memory.exists = true;
- wasm->memory.shared = shared;
+ bool shared,
+ const char* name) {
+ auto memory = std::make_unique<Memory>();
+ memory->name = name ? name : "0";
+ memory->initial = initial;
+ memory->max = int32_t(maximum); // Make sure -1 extends.
+ memory->shared = shared;
if (exportName) {
auto memoryExport = make_unique<Export>();
memoryExport->name = exportName;
- memoryExport->value = Name::fromInt(0);
+ memoryExport->value = memory->name;
memoryExport->kind = ExternalKind::Memory;
- wasm->addExport(memoryExport.release());
+ ((Module*)module)->addExport(memoryExport.release());
}
+ ((Module*)module)->removeDataSegments([&](DataSegment* curr) {
+ return true;
+ });
for (BinaryenIndex i = 0; i < numSegments; i++) {
auto curr = Builder::makeDataSegment(Name::fromInt(i),
+ memory->name,
segmentPassive[i],
(Expression*)segmentOffsets[i],
segments[i],
segmentSizes[i]);
curr->hasExplicitName = false;
- wasm->dataSegments.push_back(std::move(curr));
+ ((Module*)module)->addDataSegment(std::move(curr));
}
+ ((Module*)module)->removeMemories([&](Memory* curr) { return true; });
+ ((Module*)module)->addMemory(std::move(memory));
}
// Memory segments
@@ -3944,35 +4043,84 @@ uint32_t BinaryenGetMemorySegmentByteOffset(BinaryenModuleRef module,
return 0;
}
bool BinaryenHasMemory(BinaryenModuleRef module) {
- return ((Module*)module)->memory.exists;
+ return !((Module*)module)->memories.empty();
}
-BinaryenIndex BinaryenMemoryGetInitial(BinaryenModuleRef module) {
- return ((Module*)module)->memory.initial;
+BinaryenIndex BinaryenMemoryGetInitial(BinaryenModuleRef module,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* memory = ((Module*)module)->getMemoryOrNull(name);
+ if (memory == nullptr) {
+ Fatal() << "invalid memory '" << name << "'.";
+ }
+ return memory->initial;
}
-bool BinaryenMemoryHasMax(BinaryenModuleRef module) {
- return ((Module*)module)->memory.hasMax();
+bool BinaryenMemoryHasMax(BinaryenModuleRef module, const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* memory = ((Module*)module)->getMemoryOrNull(name);
+ if (memory == nullptr) {
+ Fatal() << "invalid memory '" << name << "'.";
+ }
+ return memory->hasMax();
}
-BinaryenIndex BinaryenMemoryGetMax(BinaryenModuleRef module) {
- return ((Module*)module)->memory.max;
+BinaryenIndex BinaryenMemoryGetMax(BinaryenModuleRef module, const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* memory = ((Module*)module)->getMemoryOrNull(name);
+ if (memory == nullptr) {
+ Fatal() << "invalid memory '" << name << "'.";
+ }
+ return memory->max;
}
-const char* BinaryenMemoryImportGetModule(BinaryenModuleRef module) {
- auto& memory = ((Module*)module)->memory;
- if (memory.imported()) {
- return memory.module.c_str();
+const char* BinaryenMemoryImportGetModule(BinaryenModuleRef module,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* memory = ((Module*)module)->getMemoryOrNull(name);
+ if (memory == nullptr) {
+ Fatal() << "invalid memory '" << name << "'.";
+ }
+ if (memory->imported()) {
+ return memory->module.c_str();
} else {
return "";
}
}
-const char* BinaryenMemoryImportGetBase(BinaryenModuleRef module) {
- auto& memory = ((Module*)module)->memory;
- if (memory.imported()) {
- return memory.base.c_str();
+const char* BinaryenMemoryImportGetBase(BinaryenModuleRef module,
+ const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* memory = ((Module*)module)->getMemoryOrNull(name);
+ if (memory == nullptr) {
+ Fatal() << "invalid memory '" << name << "'.";
+ }
+ if (memory->imported()) {
+ return memory->base.c_str();
} else {
return "";
}
}
-bool BinaryenMemoryIsShared(BinaryenModuleRef module) {
- return ((Module*)module)->memory.shared;
+bool BinaryenMemoryIsShared(BinaryenModuleRef module, const char* name) {
+ // Maintaining compatibility for instructions with a single memory
+ if (name == nullptr && module->memories.size() == 1) {
+ name = module->memories[0]->name.c_str();
+ }
+ auto* memory = ((Module*)module)->getMemoryOrNull(name);
+ if (memory == nullptr) {
+ Fatal() << "invalid memory '" << name << "'.";
+ }
+ return memory->shared;
}
size_t BinaryenGetMemorySegmentByteLength(BinaryenModuleRef module,
BinaryenIndex id) {
diff --git a/src/binaryen-c.h b/src/binaryen-c.h
index f02b2cb19..f5bb71eb9 100644
--- a/src/binaryen-c.h
+++ b/src/binaryen-c.h
@@ -746,7 +746,8 @@ BINARYEN_API BinaryenExpressionRef BinaryenLoad(BinaryenModuleRef module,
uint32_t offset,
uint32_t align,
BinaryenType type,
- BinaryenExpressionRef ptr);
+ BinaryenExpressionRef ptr,
+ const char* name);
// Store: align can be 0, in which case it will be the natural alignment (equal
// to bytes)
BINARYEN_API BinaryenExpressionRef BinaryenStore(BinaryenModuleRef module,
@@ -755,7 +756,8 @@ BINARYEN_API BinaryenExpressionRef BinaryenStore(BinaryenModuleRef module,
uint32_t align,
BinaryenExpressionRef ptr,
BinaryenExpressionRef value,
- BinaryenType type);
+ BinaryenType type,
+ const char* name);
BINARYEN_API BinaryenExpressionRef BinaryenConst(BinaryenModuleRef module,
struct BinaryenLiteral value);
BINARYEN_API BinaryenExpressionRef BinaryenUnary(BinaryenModuleRef module,
@@ -776,25 +778,27 @@ BINARYEN_API BinaryenExpressionRef BinaryenDrop(BinaryenModuleRef module,
// Return: value can be NULL
BINARYEN_API BinaryenExpressionRef BinaryenReturn(BinaryenModuleRef module,
BinaryenExpressionRef value);
-BINARYEN_API BinaryenExpressionRef BinaryenMemorySize(BinaryenModuleRef module);
-BINARYEN_API BinaryenExpressionRef
-BinaryenMemoryGrow(BinaryenModuleRef module, BinaryenExpressionRef delta);
+BINARYEN_API BinaryenExpressionRef BinaryenMemorySize(BinaryenModuleRef module,
+ const char* name);
+BINARYEN_API BinaryenExpressionRef BinaryenMemoryGrow(
+ BinaryenModuleRef module, BinaryenExpressionRef delta, const char* name);
BINARYEN_API BinaryenExpressionRef BinaryenNop(BinaryenModuleRef module);
BINARYEN_API BinaryenExpressionRef
BinaryenUnreachable(BinaryenModuleRef module);
-BINARYEN_API BinaryenExpressionRef
-BinaryenAtomicLoad(BinaryenModuleRef module,
- uint32_t bytes,
- uint32_t offset,
- BinaryenType type,
- BinaryenExpressionRef ptr);
+BINARYEN_API BinaryenExpressionRef BinaryenAtomicLoad(BinaryenModuleRef module,
+ uint32_t bytes,
+ uint32_t offset,
+ BinaryenType type,
+ BinaryenExpressionRef ptr,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenAtomicStore(BinaryenModuleRef module,
uint32_t bytes,
uint32_t offset,
BinaryenExpressionRef ptr,
BinaryenExpressionRef value,
- BinaryenType type);
+ BinaryenType type,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenAtomicRMW(BinaryenModuleRef module,
BinaryenOp op,
@@ -802,7 +806,8 @@ BinaryenAtomicRMW(BinaryenModuleRef module,
BinaryenIndex offset,
BinaryenExpressionRef ptr,
BinaryenExpressionRef value,
- BinaryenType type);
+ BinaryenType type,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenAtomicCmpxchg(BinaryenModuleRef module,
BinaryenIndex bytes,
@@ -810,17 +815,20 @@ BinaryenAtomicCmpxchg(BinaryenModuleRef module,
BinaryenExpressionRef ptr,
BinaryenExpressionRef expected,
BinaryenExpressionRef replacement,
- BinaryenType type);
+ BinaryenType type,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenAtomicWait(BinaryenModuleRef module,
BinaryenExpressionRef ptr,
BinaryenExpressionRef expected,
BinaryenExpressionRef timeout,
- BinaryenType type);
+ BinaryenType type,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenAtomicNotify(BinaryenModuleRef module,
BinaryenExpressionRef ptr,
- BinaryenExpressionRef notifyCount);
+ BinaryenExpressionRef notifyCount,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenAtomicFence(BinaryenModuleRef module);
BINARYEN_API BinaryenExpressionRef
@@ -853,7 +861,8 @@ BINARYEN_API BinaryenExpressionRef BinaryenSIMDLoad(BinaryenModuleRef module,
BinaryenOp op,
uint32_t offset,
uint32_t align,
- BinaryenExpressionRef ptr);
+ BinaryenExpressionRef ptr,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenSIMDLoadStoreLane(BinaryenModuleRef module,
BinaryenOp op,
@@ -861,25 +870,30 @@ BinaryenSIMDLoadStoreLane(BinaryenModuleRef module,
uint32_t align,
uint8_t index,
BinaryenExpressionRef ptr,
- BinaryenExpressionRef vec);
+ BinaryenExpressionRef vec,
+ const char* name);
BINARYEN_API BinaryenExpressionRef
BinaryenMemoryInit(BinaryenModuleRef module,
uint32_t segment,
BinaryenExpressionRef dest,
BinaryenExpressionRef offset,
- BinaryenExpressionRef size);
+ BinaryenExpressionRef size,
+ const char* name);
BINARYEN_API BinaryenExpressionRef BinaryenDataDrop(BinaryenModuleRef module,
uint32_t segment);
BINARYEN_API BinaryenExpressionRef
BinaryenMemoryCopy(BinaryenModuleRef module,
BinaryenExpressionRef dest,
BinaryenExpressionRef source,
- BinaryenExpressionRef size);
+ BinaryenExpressionRef size,
+ const char* destMemory,
+ const char* sourceMemory);
BINARYEN_API BinaryenExpressionRef
BinaryenMemoryFill(BinaryenModuleRef module,
BinaryenExpressionRef dest,
BinaryenExpressionRef value,
- BinaryenExpressionRef size);
+ BinaryenExpressionRef size,
+ const char* name);
BINARYEN_API BinaryenExpressionRef BinaryenRefNull(BinaryenModuleRef module,
BinaryenType type);
BINARYEN_API BinaryenExpressionRef BinaryenRefIs(BinaryenModuleRef module,
@@ -2194,6 +2208,9 @@ BINARYEN_API void BinaryenAddTagImport(BinaryenModuleRef module,
BinaryenType params,
BinaryenType results);
+// Memory
+BINARYEN_REF(Memory);
+
// Exports
BINARYEN_REF(Export);
@@ -2310,8 +2327,7 @@ BinaryenGetElementSegment(BinaryenModuleRef module, const char* name);
BINARYEN_API BinaryenElementSegmentRef
BinaryenGetElementSegmentByIndex(BinaryenModuleRef module, BinaryenIndex index);
-// Memory. One per module
-
+// This will create a memory, overwriting any existing memory
// Each memory has data in segments, a start offset in segmentOffsets, and a
// size in segmentSizes. exportName can be NULL
BINARYEN_API void BinaryenSetMemory(BinaryenModuleRef module,
@@ -2323,16 +2339,22 @@ BINARYEN_API void BinaryenSetMemory(BinaryenModuleRef module,
BinaryenExpressionRef* segmentOffsets,
BinaryenIndex* segmentSizes,
BinaryenIndex numSegments,
- bool shared);
+ bool shared,
+ const char* name);
BINARYEN_API bool BinaryenHasMemory(BinaryenModuleRef module);
-BINARYEN_API BinaryenIndex BinaryenMemoryGetInitial(BinaryenModuleRef module);
-BINARYEN_API bool BinaryenMemoryHasMax(BinaryenModuleRef module);
-BINARYEN_API BinaryenIndex BinaryenMemoryGetMax(BinaryenModuleRef module);
-BINARYEN_API const char*
-BinaryenMemoryImportGetModule(BinaryenModuleRef module);
-BINARYEN_API const char* BinaryenMemoryImportGetBase(BinaryenModuleRef module);
-BINARYEN_API bool BinaryenMemoryIsShared(BinaryenModuleRef module);
+BINARYEN_API BinaryenIndex BinaryenMemoryGetInitial(BinaryenModuleRef module,
+ const char* name);
+BINARYEN_API bool BinaryenMemoryHasMax(BinaryenModuleRef module,
+ const char* name);
+BINARYEN_API BinaryenIndex BinaryenMemoryGetMax(BinaryenModuleRef module,
+ const char* name);
+BINARYEN_API const char* BinaryenMemoryImportGetModule(BinaryenModuleRef module,
+ const char* name);
+BINARYEN_API const char* BinaryenMemoryImportGetBase(BinaryenModuleRef module,
+ const char* name);
+BINARYEN_API bool BinaryenMemoryIsShared(BinaryenModuleRef module,
+ const char* name);
// Memory segments. Query utilities.
diff --git a/src/ir/import-utils.h b/src/ir/import-utils.h
index 2e7a4f44c..d0b5a8042 100644
--- a/src/ir/import-utils.h
+++ b/src/ir/import-utils.h
@@ -30,6 +30,7 @@ struct ImportInfo {
std::vector<Global*> importedGlobals;
std::vector<Function*> importedFunctions;
std::vector<Table*> importedTables;
+ std::vector<Memory*> importedMemories;
std::vector<Tag*> importedTags;
ImportInfo(Module& wasm) : wasm(wasm) {
@@ -48,6 +49,11 @@ struct ImportInfo {
importedTables.push_back(import.get());
}
}
+ for (auto& import : wasm.memories) {
+ if (import->imported()) {
+ importedMemories.push_back(import.get());
+ }
+ }
for (auto& import : wasm.tags) {
if (import->imported()) {
importedTags.push_back(import.get());
@@ -88,11 +94,13 @@ struct ImportInfo {
Index getNumImportedTables() { return importedTables.size(); }
+ Index getNumImportedMemories() { return importedMemories.size(); }
+
Index getNumImportedTags() { return importedTags.size(); }
Index getNumImports() {
return getNumImportedGlobals() + getNumImportedFunctions() +
- getNumImportedTags() + (wasm.memory.imported() ? 1 : 0) +
+ getNumImportedTags() + getNumImportedMemories() +
getNumImportedTables();
}
@@ -108,6 +116,10 @@ struct ImportInfo {
return wasm.tables.size() - getNumImportedTables();
}
+ Index getNumDefinedMemories() {
+ return wasm.memories.size() - getNumImportedMemories();
+ }
+
Index getNumDefinedTags() { return wasm.tags.size() - getNumImportedTags(); }
};
diff --git a/src/ir/memory-utils.cpp b/src/ir/memory-utils.cpp
index 8dc3baeb9..f1471b7a4 100644
--- a/src/ir/memory-utils.cpp
+++ b/src/ir/memory-utils.cpp
@@ -20,6 +20,10 @@
namespace wasm::MemoryUtils {
bool flatten(Module& wasm) {
+ // Flatten does not currently have support for multi-memories
+ if (wasm.memories.size() > 1) {
+ return false;
+ }
// The presence of any MemoryInit instructions is a problem because they care
// about segment identity, which flattening gets rid of ( when it merges them
// all into one big segment).
@@ -62,7 +66,6 @@ bool flatten(Module& wasm) {
}
std::copy(segment->data.begin(), segment->data.end(), data.begin() + start);
}
- dataSegments.resize(1);
dataSegments[0]->offset->cast<Const>()->value = Literal(int32_t(0));
dataSegments[0]->data.swap(data);
wasm.removeDataSegments(
diff --git a/src/ir/memory-utils.h b/src/ir/memory-utils.h
index 5e9086ca4..9bdd00258 100644
--- a/src/ir/memory-utils.h
+++ b/src/ir/memory-utils.h
@@ -30,19 +30,25 @@ namespace wasm::MemoryUtils {
// Flattens memory into a single data segment, or no segment. If there is
// a segment, it starts at 0.
// Returns true if successful (e.g. relocatable segments cannot be flattened).
+// Does not yet support multi-memories
bool flatten(Module& wasm);
-// Ensures that the memory exists (of minimal size).
-inline void ensureExists(Memory& memory) {
- if (!memory.exists) {
- memory.exists = true;
- memory.initial = memory.max = 1;
+// Ensures that a memory exists (of minimal size).
+inline void ensureExists(Module* wasm) {
+ if (wasm->memories.empty()) {
+ auto memory = Builder::makeMemory("0");
+ memory->initial = memory->max = 1;
+ wasm->addMemory(std::move(memory));
}
}
// Try to merge segments until they fit into web limitations.
// Return true if successful.
+// Does not yet support multi-memories
inline bool ensureLimitedSegments(Module& module) {
+ if (module.memories.size() > 1) {
+ return false;
+ }
auto& dataSegments = module.dataSegments;
if (dataSegments.size() <= WebLimitations::MaxDataSegments) {
return true;
@@ -136,6 +142,7 @@ inline bool ensureLimitedSegments(Module& module) {
c->type = Type::i32;
auto combined = Builder::makeDataSegment();
+ combined->memory = module.memories[0]->name;
combined->offset = c;
for (Index j = i; j < dataSegments.size(); j++) {
auto& segment = dataSegments[j];
@@ -156,6 +163,7 @@ inline bool ensureLimitedSegments(Module& module) {
}
dataSegments.swap(mergedSegments);
+ module.updateDataSegmentsMap();
return true;
}
} // namespace wasm::MemoryUtils
diff --git a/src/ir/module-splitting.cpp b/src/ir/module-splitting.cpp
index e24dd6452..8294e1575 100644
--- a/src/ir/module-splitting.cpp
+++ b/src/ir/module-splitting.cpp
@@ -612,14 +612,9 @@ void ModuleSplitter::shareImportableItems() {
// TODO: Be more selective by only sharing global items that are actually used
// in the secondary module, just like we do for functions.
- if (primary.memory.exists) {
- secondary.memory.exists = true;
- secondary.memory.initial = primary.memory.initial;
- secondary.memory.max = primary.memory.max;
- secondary.memory.shared = primary.memory.shared;
- secondary.memory.indexType = primary.memory.indexType;
- makeImportExport(
- primary.memory, secondary.memory, "memory", ExternalKind::Memory);
+ for (auto& memory : primary.memories) {
+ auto secondaryMemory = ModuleUtils::copyMemory(memory.get(), secondary);
+ makeImportExport(*memory, *secondaryMemory, "memory", ExternalKind::Memory);
}
for (auto& table : primary.tables) {
diff --git a/src/ir/module-utils.h b/src/ir/module-utils.h
index 4f731748e..81f832b40 100644
--- a/src/ir/module-utils.h
+++ b/src/ir/module-utils.h
@@ -106,10 +106,22 @@ inline Table* copyTable(const Table* table, Module& out) {
return out.addTable(std::move(ret));
}
+inline Memory* copyMemory(const Memory* memory, Module& out) {
+ auto ret = Builder::makeMemory(memory->name);
+ ret->hasExplicitName = memory->hasExplicitName;
+ ret->initial = memory->initial;
+ ret->max = memory->max;
+ ret->shared = memory->shared;
+ ret->indexType = memory->indexType;
+
+ return out.addMemory(std::move(ret));
+}
+
inline DataSegment* copyDataSegment(const DataSegment* segment, Module& out) {
auto ret = Builder::makeDataSegment();
ret->name = segment->name;
ret->hasExplicitName = segment->hasExplicitName;
+ ret->memory = segment->memory;
ret->isPassive = segment->isPassive;
if (!segment->isPassive) {
auto offset = ExpressionManipulator::copy(segment->offset, out);
@@ -141,10 +153,12 @@ inline void copyModule(const Module& in, Module& out) {
for (auto& curr : in.tables) {
copyTable(curr.get(), out);
}
+ for (auto& curr : in.memories) {
+ copyMemory(curr.get(), out);
+ }
for (auto& curr : in.dataSegments) {
copyDataSegment(curr.get(), out);
}
- out.memory = in.memory;
out.start = in.start;
out.userSections = in.userSections;
out.debugInfoFileNames = in.debugInfoFileNames;
@@ -207,14 +221,27 @@ inline void renameFunction(Module& wasm, Name oldName, Name newName) {
// Convenient iteration over imported/non-imported module elements
template<typename T> inline void iterImportedMemories(Module& wasm, T visitor) {
- if (wasm.memory.exists && wasm.memory.imported()) {
- visitor(&wasm.memory);
+ for (auto& import : wasm.memories) {
+ if (import->imported()) {
+ visitor(import.get());
+ }
}
}
template<typename T> inline void iterDefinedMemories(Module& wasm, T visitor) {
- if (wasm.memory.exists && !wasm.memory.imported()) {
- visitor(&wasm.memory);
+ for (auto& import : wasm.memories) {
+ if (!import->imported()) {
+ visitor(import.get());
+ }
+ }
+}
+
+template<typename T>
+inline void iterMemorySegments(Module& wasm, Name memory, T visitor) {
+ for (auto& segment : wasm.dataSegments) {
+ if (!segment->isPassive && segment->memory == memory) {
+ visitor(segment.get());
+ }
}
}
diff --git a/src/js/binaryen.js-post.js b/src/js/binaryen.js-post.js
index 6ba5454e9..53930e16c 100644
--- a/src/js/binaryen.js-post.js
+++ b/src/js/binaryen.js-post.js
@@ -687,30 +687,30 @@ function wrapModule(module, self = {}) {
}
self['memory'] = {
- 'size'() {
- return Module['_BinaryenMemorySize'](module);
+ 'size'(name) {
+ return Module['_BinaryenMemorySize'](module, strToStack(name));
},
- 'grow'(value) {
- return Module['_BinaryenMemoryGrow'](module, value);
+ 'grow'(value, name) {
+ return Module['_BinaryenMemoryGrow'](module, value, strToStack(name));
},
- 'init'(segment, dest, offset, size) {
- return Module['_BinaryenMemoryInit'](module, segment, dest, offset, size);
+ 'init'(segment, dest, offset, size, name) {
+ return Module['_BinaryenMemoryInit'](module, segment, dest, offset, size, strToStack(name));
},
- 'copy'(dest, source, size) {
- return Module['_BinaryenMemoryCopy'](module, dest, source, size);
+ 'copy'(dest, source, size, destMemory, sourceMemory) {
+ return Module['_BinaryenMemoryCopy'](module, dest, source, size, strToStack(destMemory), strToStack(sourceMemory));
},
- 'fill'(dest, value, size) {
- return Module['_BinaryenMemoryFill'](module, dest, value, size);
+ 'fill'(dest, value, size, name) {
+ return Module['_BinaryenMemoryFill'](module, dest, value, size, strToStack(name));
},
'atomic': {
- 'notify'(ptr, notifyCount) {
- return Module['_BinaryenAtomicNotify'](module, ptr, notifyCount);
+ 'notify'(ptr, notifyCount, name) {
+ return Module['_BinaryenAtomicNotify'](module, ptr, notifyCount, strToStack(name));
},
- 'wait32'(ptr, expected, timeout) {
- return Module['_BinaryenAtomicWait'](module, ptr, expected, timeout, Module['i32']);
+ 'wait32'(ptr, expected, timeout, name) {
+ return Module['_BinaryenAtomicWait'](module, ptr, expected, timeout, Module['i32'], strToStack(name));
},
- 'wait64'(ptr, expected, timeout) {
- return Module['_BinaryenAtomicWait'](module, ptr, expected, timeout, Module['i64']);
+ 'wait64'(ptr, expected, timeout, name) {
+ return Module['_BinaryenAtomicWait'](module, ptr, expected, timeout, Module['i64'], strToStack(name));
}
}
}
@@ -722,29 +722,29 @@ function wrapModule(module, self = {}) {
}
self['i32'] = {
- 'load'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 4, true, offset, align, Module['i32'], ptr);
+ 'load'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 4, true, offset, align, Module['i32'], ptr, strToStack(name));
},
- 'load8_s'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 1, true, offset, align, Module['i32'], ptr);
+ 'load8_s'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 1, true, offset, align, Module['i32'], ptr, strToStack(name));
},
- 'load8_u'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 1, false, offset, align, Module['i32'], ptr);
+ 'load8_u'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 1, false, offset, align, Module['i32'], ptr, strToStack(name));
},
- 'load16_s'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 2, true, offset, align, Module['i32'], ptr);
+ 'load16_s'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 2, true, offset, align, Module['i32'], ptr, strToStack(name));
},
- 'load16_u'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 2, false, offset, align, Module['i32'], ptr);
+ 'load16_u'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 2, false, offset, align, Module['i32'], ptr, strToStack(name));
},
- 'store'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 4, offset, align, ptr, value, Module['i32']);
+ 'store'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 4, offset, align, ptr, value, Module['i32'], strToStack(name));
},
- 'store8'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 1, offset, align, ptr, value, Module['i32']);
+ 'store8'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 1, offset, align, ptr, value, Module['i32'], strToStack(name));
},
- 'store16'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 2, offset, align, ptr, value, Module['i32']);
+ 'store16'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 2, offset, align, ptr, value, Module['i32'], strToStack(name));
},
'const'(x) {
return preserveStack(() => {
@@ -885,91 +885,91 @@ function wrapModule(module, self = {}) {
return Module['_BinaryenBinary'](module, Module['GeUInt32'], left, right);
},
'atomic': {
- 'load'(offset, ptr) {
- return Module['_BinaryenAtomicLoad'](module, 4, offset, Module['i32'], ptr);
+ 'load'(offset, ptr, name) {
+ return Module['_BinaryenAtomicLoad'](module, 4, offset, Module['i32'], ptr, strToStack(name));
},
- 'load8_u'(offset, ptr) {
- return Module['_BinaryenAtomicLoad'](module, 1, offset, Module['i32'], ptr);
+ 'load8_u'(offset, ptr, name) {
+ return Module['_BinaryenAtomicLoad'](module, 1, offset, Module['i32'], ptr, strToStack(name));
},
- 'load16_u'(offset, ptr) {
- return Module['_BinaryenAtomicLoad'](module, 2, offset, Module['i32'], ptr);
+ 'load16_u'(offset, ptr, name) {
+ return Module['_BinaryenAtomicLoad'](module, 2, offset, Module['i32'], ptr, strToStack(name));
},
- 'store'(offset, ptr, value) {
- return Module['_BinaryenAtomicStore'](module, 4, offset, ptr, value, Module['i32']);
+ 'store'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicStore'](module, 4, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'store8'(offset, ptr, value) {
- return Module['_BinaryenAtomicStore'](module, 1, offset, ptr, value, Module['i32']);
+ 'store8'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicStore'](module, 1, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'store16'(offset, ptr, value) {
- return Module['_BinaryenAtomicStore'](module, 2, offset, ptr, value, Module['i32']);
+ 'store16'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicStore'](module, 2, offset, ptr, value, Module['i32'], strToStack(name));
},
'rmw': {
- 'add'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 4, offset, ptr, value, Module['i32']);
+ 'add'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 4, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'sub'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 4, offset, ptr, value, Module['i32']);
+ 'sub'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 4, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'and'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 4, offset, ptr, value, Module['i32']);
+ 'and'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 4, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'or'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 4, offset, ptr, value, Module['i32']);
+ 'or'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 4, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'xor'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 4, offset, ptr, value, Module['i32']);
+ 'xor'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 4, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'xchg'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 4, offset, ptr, value, Module['i32']);
+ 'xchg'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 4, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'cmpxchg'(offset, ptr, expected, replacement) {
- return Module['_BinaryenAtomicCmpxchg'](module, 4, offset, ptr, expected, replacement, Module['i32'])
+ 'cmpxchg'(offset, ptr, expected, replacement, name) {
+ return Module['_BinaryenAtomicCmpxchg'](module, 4, offset, ptr, expected, replacement, Module['i32'], strToStack(name))
},
},
'rmw8_u': {
- 'add'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 1, offset, ptr, value, Module['i32']);
+ 'add'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 1, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'sub'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 1, offset, ptr, value, Module['i32']);
+ 'sub'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 1, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'and'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 1, offset, ptr, value, Module['i32']);
+ 'and'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 1, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'or'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 1, offset, ptr, value, Module['i32']);
+ 'or'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 1, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'xor'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 1, offset, ptr, value, Module['i32']);
+ 'xor'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 1, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'xchg'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 1, offset, ptr, value, Module['i32']);
+ 'xchg'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 1, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'cmpxchg'(offset, ptr, expected, replacement) {
- return Module['_BinaryenAtomicCmpxchg'](module, 1, offset, ptr, expected, replacement, Module['i32'])
+ 'cmpxchg'(offset, ptr, expected, replacement, name) {
+ return Module['_BinaryenAtomicCmpxchg'](module, 1, offset, ptr, expected, replacement, Module['i32'], strToStack(name))
},
},
'rmw16_u': {
- 'add'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 2, offset, ptr, value, Module['i32']);
+ 'add'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 2, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'sub'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 2, offset, ptr, value, Module['i32']);
+ 'sub'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 2, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'and'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 2, offset, ptr, value, Module['i32']);
+ 'and'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 2, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'or'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 2, offset, ptr, value, Module['i32']);
+ 'or'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 2, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'xor'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 2, offset, ptr, value, Module['i32']);
+ 'xor'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 2, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'xchg'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 2, offset, ptr, value, Module['i32']);
+ 'xchg'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 2, offset, ptr, value, Module['i32'], strToStack(name));
},
- 'cmpxchg'(offset, ptr, expected, replacement) {
- return Module['_BinaryenAtomicCmpxchg'](module, 2, offset, ptr, expected, replacement, Module['i32'])
+ 'cmpxchg'(offset, ptr, expected, replacement, name) {
+ return Module['_BinaryenAtomicCmpxchg'](module, 2, offset, ptr, expected, replacement, Module['i32'], strToStack(name))
},
},
},
@@ -979,38 +979,38 @@ function wrapModule(module, self = {}) {
};
self['i64'] = {
- 'load'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 8, true, offset, align, Module['i64'], ptr);
+ 'load'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 8, true, offset, align, Module['i64'], ptr, strToStack(name));
},
- 'load8_s'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 1, true, offset, align, Module['i64'], ptr);
+ 'load8_s'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 1, true, offset, align, Module['i64'], ptr, strToStack(name));
},
- 'load8_u'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 1, false, offset, align, Module['i64'], ptr);
+ 'load8_u'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 1, false, offset, align, Module['i64'], ptr, strToStack(name));
},
- 'load16_s'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 2, true, offset, align, Module['i64'], ptr);
+ 'load16_s'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 2, true, offset, align, Module['i64'], ptr, strToStack(name));
},
- 'load16_u'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 2, false, offset, align, Module['i64'], ptr);
+ 'load16_u'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 2, false, offset, align, Module['i64'], ptr, strToStack(name));
},
- 'load32_s'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 4, true, offset, align, Module['i64'], ptr);
+ 'load32_s'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 4, true, offset, align, Module['i64'], ptr, strToStack(name));
},
- 'load32_u'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 4, false, offset, align, Module['i64'], ptr);
+ 'load32_u'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 4, false, offset, align, Module['i64'], ptr, strToStack(name));
},
- 'store'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 8, offset, align, ptr, value, Module['i64']);
+ 'store'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 8, offset, align, ptr, value, Module['i64'], strToStack(name));
},
- 'store8'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 1, offset, align, ptr, value, Module['i64']);
+ 'store8'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 1, offset, align, ptr, value, Module['i64'], strToStack(name));
},
- 'store16'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 2, offset, align, ptr, value, Module['i64']);
+ 'store16'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 2, offset, align, ptr, value, Module['i64'], strToStack(name));
},
- 'store32'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 4, offset, align, ptr, value, Module['i64']);
+ 'store32'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 4, offset, align, ptr, value, Module['i64'], strToStack(name));
},
'const'(x, y) {
return preserveStack(() => {
@@ -1157,120 +1157,120 @@ function wrapModule(module, self = {}) {
return Module['_BinaryenBinary'](module, Module['GeUInt64'], left, right);
},
'atomic': {
- 'load'(offset, ptr) {
- return Module['_BinaryenAtomicLoad'](module, 8, offset, Module['i64'], ptr);
+ 'load'(offset, ptr, name) {
+ return Module['_BinaryenAtomicLoad'](module, 8, offset, Module['i64'], ptr, strToStack(name));
},
- 'load8_u'(offset, ptr) {
- return Module['_BinaryenAtomicLoad'](module, 1, offset, Module['i64'], ptr);
+ 'load8_u'(offset, ptr, name) {
+ return Module['_BinaryenAtomicLoad'](module, 1, offset, Module['i64'], ptr, strToStack(name));
},
- 'load16_u'(offset, ptr) {
- return Module['_BinaryenAtomicLoad'](module, 2, offset, Module['i64'], ptr);
+ 'load16_u'(offset, ptr, name) {
+ return Module['_BinaryenAtomicLoad'](module, 2, offset, Module['i64'], ptr, strToStack(name));
},
- 'load32_u'(offset, ptr) {
- return Module['_BinaryenAtomicLoad'](module, 4, offset, Module['i64'], ptr);
+ 'load32_u'(offset, ptr, name) {
+ return Module['_BinaryenAtomicLoad'](module, 4, offset, Module['i64'], ptr, strToStack(name));
},
- 'store'(offset, ptr, value) {
- return Module['_BinaryenAtomicStore'](module, 8, offset, ptr, value, Module['i64']);
+ 'store'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicStore'](module, 8, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'store8'(offset, ptr, value) {
- return Module['_BinaryenAtomicStore'](module, 1, offset, ptr, value, Module['i64']);
+ 'store8'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicStore'](module, 1, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'store16'(offset, ptr, value) {
- return Module['_BinaryenAtomicStore'](module, 2, offset, ptr, value, Module['i64']);
+ 'store16'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicStore'](module, 2, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'store32'(offset, ptr, value) {
- return Module['_BinaryenAtomicStore'](module, 4, offset, ptr, value, Module['i64']);
+ 'store32'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicStore'](module, 4, offset, ptr, value, Module['i64'], strToStack(name));
},
'rmw': {
- 'add'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 8, offset, ptr, value, Module['i64']);
+ 'add'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 8, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'sub'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 8, offset, ptr, value, Module['i64']);
+ 'sub'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 8, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'and'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 8, offset, ptr, value, Module['i64']);
+ 'and'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 8, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'or'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 8, offset, ptr, value, Module['i64']);
+ 'or'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 8, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xor'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 8, offset, ptr, value, Module['i64']);
+ 'xor'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 8, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xchg'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 8, offset, ptr, value, Module['i64']);
+ 'xchg'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 8, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'cmpxchg'(offset, ptr, expected, replacement) {
- return Module['_BinaryenAtomicCmpxchg'](module, 8, offset, ptr, expected, replacement, Module['i64'])
+ 'cmpxchg'(offset, ptr, expected, replacement, name) {
+ return Module['_BinaryenAtomicCmpxchg'](module, 8, offset, ptr, expected, replacement, Module['i64'], strToStack(name))
},
},
'rmw8_u': {
- 'add'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 1, offset, ptr, value, Module['i64']);
+ 'add'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 1, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'sub'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 1, offset, ptr, value, Module['i64']);
+ 'sub'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 1, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'and'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 1, offset, ptr, value, Module['i64']);
+ 'and'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 1, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'or'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 1, offset, ptr, value, Module['i64']);
+ 'or'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 1, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xor'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 1, offset, ptr, value, Module['i64']);
+ 'xor'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 1, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xchg'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 1, offset, ptr, value, Module['i64']);
+ 'xchg'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 1, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'cmpxchg'(offset, ptr, expected, replacement) {
- return Module['_BinaryenAtomicCmpxchg'](module, 1, offset, ptr, expected, replacement, Module['i64'])
+ 'cmpxchg'(offset, ptr, expected, replacement, name) {
+ return Module['_BinaryenAtomicCmpxchg'](module, 1, offset, ptr, expected, replacement, Module['i64'], strToStack(name))
},
},
'rmw16_u': {
- 'add'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 2, offset, ptr, value, Module['i64']);
+ 'add'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 2, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'sub'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 2, offset, ptr, value, Module['i64']);
+ 'sub'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 2, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'and'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 2, offset, ptr, value, Module['i64']);
+ 'and'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 2, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'or'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 2, offset, ptr, value, Module['i64']);
+ 'or'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 2, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xor'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 2, offset, ptr, value, Module['i64']);
+ 'xor'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 2, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xchg'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 2, offset, ptr, value, Module['i64']);
+ 'xchg'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 2, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'cmpxchg'(offset, ptr, expected, replacement) {
- return Module['_BinaryenAtomicCmpxchg'](module, 2, offset, ptr, expected, replacement, Module['i64'])
+ 'cmpxchg'(offset, ptr, expected, replacement, name) {
+ return Module['_BinaryenAtomicCmpxchg'](module, 2, offset, ptr, expected, replacement, Module['i64'], strToStack(name))
},
},
'rmw32_u': {
- 'add'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 4, offset, ptr, value, Module['i64']);
+ 'add'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAdd'], 4, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'sub'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 4, offset, ptr, value, Module['i64']);
+ 'sub'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWSub'], 4, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'and'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 4, offset, ptr, value, Module['i64']);
+ 'and'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWAnd'], 4, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'or'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 4, offset, ptr, value, Module['i64']);
+ 'or'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWOr'], 4, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xor'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 4, offset, ptr, value, Module['i64']);
+ 'xor'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXor'], 4, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'xchg'(offset, ptr, value) {
- return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 4, offset, ptr, value, Module['i64']);
+ 'xchg'(offset, ptr, value, name) {
+ return Module['_BinaryenAtomicRMW'](module, Module['AtomicRMWXchg'], 4, offset, ptr, value, Module['i64'], strToStack(name));
},
- 'cmpxchg'(offset, ptr, expected, replacement) {
- return Module['_BinaryenAtomicCmpxchg'](module, 4, offset, ptr, expected, replacement, Module['i64'])
+ 'cmpxchg'(offset, ptr, expected, replacement, name) {
+ return Module['_BinaryenAtomicCmpxchg'](module, 4, offset, ptr, expected, replacement, Module['i64'], strToStack(name))
},
},
},
@@ -1280,11 +1280,11 @@ function wrapModule(module, self = {}) {
};
self['f32'] = {
- 'load'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 4, true, offset, align, Module['f32'], ptr);
+ 'load'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 4, true, offset, align, Module['f32'], ptr, strToStack(name));
},
- 'store'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 4, offset, align, ptr, value, Module['f32']);
+ 'store'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 4, offset, align, ptr, value, Module['f32'], strToStack(name));
},
'const'(x) {
return preserveStack(() => {
@@ -1388,11 +1388,11 @@ function wrapModule(module, self = {}) {
};
self['f64'] = {
- 'load'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 8, true, offset, align, Module['f64'], ptr);
+ 'load'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 8, true, offset, align, Module['f64'], ptr, strToStack(name));
},
- 'store'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 8, offset, align, ptr, value, Module['f64']);
+ 'store'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 8, offset, align, ptr, value, Module['f64'], strToStack(name));
},
'const'(x) {
return preserveStack(() => {
@@ -1496,71 +1496,71 @@ function wrapModule(module, self = {}) {
};
self['v128'] = {
- 'load'(offset, align, ptr) {
- return Module['_BinaryenLoad'](module, 16, false, offset, align, Module['v128'], ptr);
+ 'load'(offset, align, ptr, name) {
+ return Module['_BinaryenLoad'](module, 16, false, offset, align, Module['v128'], ptr, strToStack(name));
},
- 'load8_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load8SplatVec128'], offset, align, ptr);
+ 'load8_splat'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load8SplatVec128'], offset, align, ptr, strToStack(name));
},
- 'load16_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load16SplatVec128'], offset, align, ptr);
+ 'load16_splat'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load16SplatVec128'], offset, align, ptr, strToStack(name));
},
- 'load32_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load32SplatVec128'], offset, align, ptr);
+ 'load32_splat'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load32SplatVec128'], offset, align, ptr, strToStack(name));
},
- 'load64_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load64SplatVec128'], offset, align, ptr);
+ 'load64_splat'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load64SplatVec128'], offset, align, ptr, strToStack(name));
},
- 'load8x8_s'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load8x8SVec128'], offset, align, ptr);
+ 'load8x8_s'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load8x8SVec128'], offset, align, ptr, strToStack(name));
},
- 'load8x8_u'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load8x8UVec128'], offset, align, ptr);
+ 'load8x8_u'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load8x8UVec128'], offset, align, ptr, strToStack(name));
},
- 'load16x4_s'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load16x4SVec128'], offset, align, ptr);
+ 'load16x4_s'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load16x4SVec128'], offset, align, ptr, strToStack(name));
},
- 'load16x4_u'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load16x4UVec128'], offset, align, ptr);
+ 'load16x4_u'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load16x4UVec128'], offset, align, ptr, strToStack(name));
},
- 'load32x2_s'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load32x2SVec128'], offset, align, ptr);
+ 'load32x2_s'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load32x2SVec128'], offset, align, ptr, strToStack(name));
},
- 'load32x2_u'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load32x2UVec128'], offset, align, ptr);
+ 'load32x2_u'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load32x2UVec128'], offset, align, ptr, strToStack(name));
},
- 'load32_zero'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load32ZeroVec128'], offset, align, ptr);
+ 'load32_zero'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load32ZeroVec128'], offset, align, ptr, strToStack(name));
},
- 'load64_zero'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['Load64ZeroVec128'], offset, align, ptr);
+ 'load64_zero'(offset, align, ptr, name) {
+ return Module['_BinaryenSIMDLoad'](module, Module['Load64ZeroVec128'], offset, align, ptr, strToStack(name));
},
- 'load8_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load8LaneVec128'], offset, align, index, ptr, vec);
+ 'load8_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load8LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'load16_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load16LaneVec128'], offset, align, index, ptr, vec);
+ 'load16_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load16LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'load32_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load32LaneVec128'], offset, align, index, ptr, vec);
+ 'load32_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load32LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'load64_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load64LaneVec128'], offset, align, index, ptr, vec);
+ 'load64_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Load64LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'store8_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store8LaneVec128'], offset, align, index, ptr, vec);
+ 'store8_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store8LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'store16_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store16LaneVec128'], offset, align, index, ptr, vec);
+ 'store16_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store16LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'store32_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store32LaneVec128'], offset, align, index, ptr, vec);
+ 'store32_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store32LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'store64_lane'(offset, align, index, ptr, vec) {
- return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store64LaneVec128'], offset, align, index, ptr, vec);
+ 'store64_lane'(offset, align, index, ptr, vec, name) {
+ return Module['_BinaryenSIMDLoadStoreLane'](module, Module['Store64LaneVec128'], offset, align, index, ptr, vec, strToStack(name));
},
- 'store'(offset, align, ptr, value) {
- return Module['_BinaryenStore'](module, 16, offset, align, ptr, value, Module['v128']);
+ 'store'(offset, align, ptr, value, name) {
+ return Module['_BinaryenStore'](module, 16, offset, align, ptr, value, Module['v128'], strToStack(name));
},
'const'(i8s) {
return preserveStack(() => {
@@ -2501,7 +2501,7 @@ function wrapModule(module, self = {}) {
self['removeExport'] = function(externalName) {
return preserveStack(() => Module['_BinaryenRemoveExport'](module, strToStack(externalName)));
};
- self['setMemory'] = function(initial, maximum, exportName, segments = [], shared = false) {
+ self['setMemory'] = function(initial, maximum, exportName, segments = [], shared = false, internalName) {
// segments are assumed to be { passive: bool, offset: expression ref, data: array of 8-bit data }
return preserveStack(() => {
const segmentsLen = segments.length;
@@ -2524,22 +2524,23 @@ function wrapModule(module, self = {}) {
i32sToStack(segmentOffset),
i32sToStack(segmentDataLen),
segmentsLen,
- shared
+ shared,
+ strToStack(internalName)
);
});
};
self['hasMemory'] = function() {
return Boolean(Module['_BinaryenHasMemory'](module));
};
- self['getMemoryInfo'] = function() {
+ self['getMemoryInfo'] = function(name) {
var memoryInfo = {
- 'module': UTF8ToString(Module['_BinaryenMemoryImportGetModule'](module)),
- 'base': UTF8ToString(Module['_BinaryenMemoryImportGetBase'](module)),
- 'initial': Module['_BinaryenMemoryGetInitial'](module),
- 'shared': Boolean(Module['_BinaryenMemoryIsShared'](module))
+ 'module': UTF8ToString(Module['_BinaryenMemoryImportGetModule'](module, strToStack(name))),
+ 'base': UTF8ToString(Module['_BinaryenMemoryImportGetBase'](module, strToStack(name))),
+ 'initial': Module['_BinaryenMemoryGetInitial'](module, strToStack(name)),
+ 'shared': Boolean(Module['_BinaryenMemoryIsShared'](module, strToStack(name)))
};
- if (Module['_BinaryenMemoryHasMax'](module)) {
- memoryInfo['max'] = Module['_BinaryenMemoryGetMax'](module);
+ if (Module['_BinaryenMemoryHasMax'](module, strToStack(name))) {
+ memoryInfo['max'] = Module['_BinaryenMemoryGetMax'](module, strToStack(name));
}
return memoryInfo;
};
diff --git a/src/passes/AlignmentLowering.cpp b/src/passes/AlignmentLowering.cpp
index 8cab58072..d0ceeb610 100644
--- a/src/passes/AlignmentLowering.cpp
+++ b/src/passes/AlignmentLowering.cpp
@@ -34,7 +34,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
if (curr->align == 0 || curr->align == curr->bytes) {
return curr;
}
- auto indexType = getModule()->memory.indexType;
+ auto mem = getModule()->getMemory(curr->memory);
+ auto indexType = mem->indexType;
Builder builder(*getModule());
assert(curr->type == Type::i32);
auto temp = builder.addVar(getFunction(), indexType);
@@ -47,7 +48,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset,
1,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeBinary(
ShlInt32,
builder.makeLoad(1,
@@ -55,7 +57,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset + 1,
1,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeConst(int32_t(8))));
if (curr->signed_) {
ret = Bits::makeSignExt(ret, 2, *getModule());
@@ -71,7 +74,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset,
1,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeBinary(
ShlInt32,
builder.makeLoad(1,
@@ -79,7 +83,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset + 1,
1,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeConst(int32_t(8)))),
builder.makeBinary(
OrInt32,
@@ -90,7 +95,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset + 2,
1,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeConst(int32_t(16))),
builder.makeBinary(
ShlInt32,
@@ -99,7 +105,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset + 3,
1,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeConst(int32_t(24)))));
} else if (curr->align == 2) {
ret = builder.makeBinary(
@@ -109,7 +116,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset,
2,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeBinary(
ShlInt32,
builder.makeLoad(2,
@@ -117,7 +125,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset + 2,
2,
builder.makeLocalGet(temp, indexType),
- Type::i32),
+ Type::i32,
+ curr->memory),
builder.makeConst(int32_t(16))));
} else {
WASM_UNREACHABLE("invalid alignment");
@@ -135,7 +144,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
}
Builder builder(*getModule());
assert(curr->value->type == Type::i32);
- auto indexType = getModule()->memory.indexType;
+ auto mem = getModule()->getMemory(curr->memory);
+ auto indexType = mem->indexType;
auto tempPtr = builder.addVar(getFunction(), indexType);
auto tempValue = builder.addVar(getFunction(), Type::i32);
auto* block =
@@ -148,7 +158,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
1,
builder.makeLocalGet(tempPtr, indexType),
builder.makeLocalGet(tempValue, Type::i32),
- Type::i32));
+ Type::i32,
+ curr->memory));
block->list.push_back(builder.makeStore(
1,
curr->offset + 1,
@@ -157,7 +168,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
builder.makeBinary(ShrUInt32,
builder.makeLocalGet(tempValue, Type::i32),
builder.makeConst(int32_t(8))),
- Type::i32));
+ Type::i32,
+ curr->memory));
} else if (curr->bytes == 4) {
if (curr->align == 1) {
block->list.push_back(
@@ -166,7 +178,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
1,
builder.makeLocalGet(tempPtr, indexType),
builder.makeLocalGet(tempValue, Type::i32),
- Type::i32));
+ Type::i32,
+ curr->memory));
block->list.push_back(builder.makeStore(
1,
curr->offset + 1,
@@ -175,7 +188,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
builder.makeBinary(ShrUInt32,
builder.makeLocalGet(tempValue, Type::i32),
builder.makeConst(int32_t(8))),
- Type::i32));
+ Type::i32,
+ curr->memory));
block->list.push_back(builder.makeStore(
1,
curr->offset + 2,
@@ -184,7 +198,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
builder.makeBinary(ShrUInt32,
builder.makeLocalGet(tempValue, Type::i32),
builder.makeConst(int32_t(16))),
- Type::i32));
+ Type::i32,
+ curr->memory));
block->list.push_back(builder.makeStore(
1,
curr->offset + 3,
@@ -193,7 +208,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
builder.makeBinary(ShrUInt32,
builder.makeLocalGet(tempValue, Type::i32),
builder.makeConst(int32_t(24))),
- Type::i32));
+ Type::i32,
+ curr->memory));
} else if (curr->align == 2) {
block->list.push_back(
builder.makeStore(2,
@@ -201,7 +217,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
2,
builder.makeLocalGet(tempPtr, indexType),
builder.makeLocalGet(tempValue, Type::i32),
- Type::i32));
+ Type::i32,
+ curr->memory));
block->list.push_back(builder.makeStore(
2,
curr->offset + 2,
@@ -210,7 +227,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
builder.makeBinary(ShrUInt32,
builder.makeLocalGet(tempValue, Type::i32),
builder.makeConst(int32_t(16))),
- Type::i32));
+ Type::i32,
+ curr->memory));
} else {
WASM_UNREACHABLE("invalid alignment");
}
@@ -256,7 +274,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
break;
}
// Load two 32-bit pieces, and combine them.
- auto indexType = getModule()->memory.indexType;
+ auto mem = getModule()->getMemory(curr->memory);
+ auto indexType = mem->indexType;
auto temp = builder.addVar(getFunction(), indexType);
auto* set = builder.makeLocalSet(temp, curr->ptr);
Expression* low =
@@ -265,7 +284,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset,
curr->align,
builder.makeLocalGet(temp, indexType),
- Type::i32));
+ Type::i32,
+ curr->memory));
low = builder.makeUnary(ExtendUInt32, low);
// Note that the alignment is assumed to be the same here, even though
// we add an offset of 4. That is because this is an unaligned load, so
@@ -277,7 +297,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->offset + 4,
curr->align,
builder.makeLocalGet(temp, indexType),
- Type::i32));
+ Type::i32,
+ curr->memory));
high = builder.makeUnary(ExtendUInt32, high);
high =
builder.makeBinary(ShlInt64, high, builder.makeConst(int64_t(32)));
@@ -335,7 +356,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
value = builder.makeUnary(ReinterpretFloat64, value);
}
// Store as two 32-bit pieces.
- auto indexType = getModule()->memory.indexType;
+ auto mem = getModule()->getMemory(curr->memory);
+ auto indexType = mem->indexType;
auto tempPtr = builder.addVar(getFunction(), indexType);
auto* setPtr = builder.makeLocalSet(tempPtr, curr->ptr);
auto tempValue = builder.addVar(getFunction(), Type::i64);
@@ -348,7 +370,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->align,
builder.makeLocalGet(tempPtr, indexType),
low,
- Type::i32));
+ Type::i32,
+ curr->memory));
Expression* high =
builder.makeBinary(ShrUInt64,
builder.makeLocalGet(tempValue, Type::i64),
@@ -364,7 +387,8 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> {
curr->align,
builder.makeLocalGet(tempPtr, indexType),
high,
- Type::i32));
+ Type::i32,
+ curr->memory));
replacement = builder.makeBlock({setPtr, setValue, low, high});
break;
}
diff --git a/src/passes/Asyncify.cpp b/src/passes/Asyncify.cpp
index 8a50d49e6..b076c77d4 100644
--- a/src/passes/Asyncify.cpp
+++ b/src/passes/Asyncify.cpp
@@ -794,7 +794,9 @@ static bool doesCall(Expression* curr) {
class AsyncifyBuilder : public Builder {
public:
- AsyncifyBuilder(Module& wasm) : Builder(wasm) {}
+ Module& wasm;
+
+ AsyncifyBuilder(Module& wasm) : Builder(wasm), wasm(wasm) {}
Expression* makeGetStackPos() {
return makeLoad(4,
@@ -802,7 +804,8 @@ public:
int32_t(DataOffset::BStackPos),
4,
makeGlobalGet(ASYNCIFY_DATA, Type::i32),
- Type::i32);
+ Type::i32,
+ wasm.memories[0]->name);
}
Expression* makeIncStackPos(int32_t by) {
@@ -815,7 +818,8 @@ public:
4,
makeGlobalGet(ASYNCIFY_DATA, Type::i32),
makeBinary(AddInt32, makeGetStackPos(), makeConst(Literal(by))),
- Type::i32);
+ Type::i32,
+ wasm.memories[0]->name);
}
Expression* makeStateCheck(State value) {
@@ -1222,8 +1226,13 @@ struct AsyncifyLocals : public WalkerPass<PostWalker<AsyncifyLocals>> {
builder->makeIncStackPos(-4),
builder->makeLocalSet(
rewindIndex,
- builder->makeLoad(
- 4, false, 0, 4, builder->makeGetStackPos(), Type::i32))));
+ builder->makeLoad(4,
+ false,
+ 0,
+ 4,
+ builder->makeGetStackPos(),
+ Type::i32,
+ getModule()->memories[0]->name))));
} else if (curr->target == ASYNCIFY_CHECK_CALL_INDEX) {
replaceCurrent(builder->makeBinary(
EqInt32,
@@ -1392,7 +1401,8 @@ private:
offset,
STACK_ALIGN,
builder->makeLocalGet(tempIndex, Type::i32),
- type));
+ type,
+ getModule()->memories[0]->name));
offset += size;
}
Expression* load;
@@ -1440,7 +1450,8 @@ private:
STACK_ALIGN,
builder->makeLocalGet(tempIndex, Type::i32),
localGet,
- type));
+ type,
+ getModule()->memories[0]->name));
offset += size;
++j;
}
@@ -1458,7 +1469,8 @@ private:
4,
builder->makeGetStackPos(),
builder->makeLocalGet(tempIndex, Type::i32),
- Type::i32),
+ Type::i32,
+ getModule()->memories[0]->name),
builder->makeIncStackPos(4));
}
@@ -1483,7 +1495,7 @@ struct Asyncify : public Pass {
bool optimize = runner->options.optimizeLevel > 0;
// Ensure there is a memory, as we need it.
- MemoryUtils::ensureExists(module->memory);
+ MemoryUtils::ensureExists(module);
// Find which things can change the state.
auto stateChangingImports = String::trim(read_possible_response_file(
@@ -1659,14 +1671,16 @@ private:
int32_t(DataOffset::BStackPos),
4,
builder.makeGlobalGet(ASYNCIFY_DATA, Type::i32),
- Type::i32);
+ Type::i32,
+ module->memories[0]->name);
auto* stackEnd =
builder.makeLoad(4,
false,
int32_t(DataOffset::BStackEnd),
4,
builder.makeGlobalGet(ASYNCIFY_DATA, Type::i32),
- Type::i32);
+ Type::i32,
+ module->memories[0]->name);
body->list.push_back(
builder.makeIf(builder.makeBinary(GtUInt32, stackPos, stackEnd),
builder.makeUnreachable()));
diff --git a/src/passes/AvoidReinterprets.cpp b/src/passes/AvoidReinterprets.cpp
index c2607725d..feea4871f 100644
--- a/src/passes/AvoidReinterprets.cpp
+++ b/src/passes/AvoidReinterprets.cpp
@@ -115,11 +115,11 @@ struct AvoidReinterprets : public WalkerPass<PostWalker<AvoidReinterprets>> {
void optimize(Function* func) {
std::set<Load*> unoptimizables;
- auto indexType = getModule()->memory.indexType;
for (auto& [load, info] : infos) {
if (info.reinterpreted && canReplaceWithReinterpret(load)) {
// We should use another load here, to avoid reinterprets.
- info.ptrLocal = Builder::addVar(func, indexType);
+ auto mem = getModule()->getMemory(load->memory);
+ info.ptrLocal = Builder::addVar(func, mem->indexType);
info.reinterpretedLocal =
Builder::addVar(func, load->type.reinterpret());
} else {
@@ -173,7 +173,8 @@ struct AvoidReinterprets : public WalkerPass<PostWalker<AvoidReinterprets>> {
auto& info = iter->second;
Builder builder(*module);
auto* ptr = curr->ptr;
- auto indexType = getModule()->memory.indexType;
+ auto mem = getModule()->getMemory(curr->memory);
+ auto indexType = mem->indexType;
curr->ptr = builder.makeLocalGet(info.ptrLocal, indexType);
// Note that the other load can have its sign set to false - if the
// original were an integer, the other is a float anyhow; and if
@@ -195,7 +196,8 @@ struct AvoidReinterprets : public WalkerPass<PostWalker<AvoidReinterprets>> {
load->offset,
load->align,
ptr,
- load->type.reinterpret());
+ load->type.reinterpret(),
+ load->memory);
}
} finalOptimizer(infos, localGraph, getModule(), getPassOptions());
diff --git a/src/passes/I64ToI32Lowering.cpp b/src/passes/I64ToI32Lowering.cpp
index 11707d4f3..002e4568d 100644
--- a/src/passes/I64ToI32Lowering.cpp
+++ b/src/passes/I64ToI32Lowering.cpp
@@ -386,7 +386,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
curr->offset + 4,
std::min(uint32_t(curr->align), uint32_t(4)),
builder->makeLocalGet(ptrTemp, Type::i32),
- Type::i32));
+ Type::i32,
+ curr->memory));
} else if (curr->signed_) {
loadHigh = builder->makeLocalSet(
highBits,
@@ -432,7 +433,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
std::min(uint32_t(curr->align), uint32_t(4)),
builder->makeLocalGet(ptrTemp, Type::i32),
builder->makeLocalGet(highBits, Type::i32),
- Type::i32);
+ Type::i32,
+ curr->memory);
replaceCurrent(builder->blockify(setPtr, curr, storeHigh));
}
}
@@ -594,7 +596,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
Type::i32));
setOutParam(result, std::move(highBits));
replaceCurrent(result);
- MemoryUtils::ensureExists(getModule()->memory);
+ MemoryUtils::ensureExists(getModule());
ABI::wasm2js::ensureHelpers(getModule());
}
@@ -612,7 +614,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
Type::none),
builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_F64, {}, Type::f64));
replaceCurrent(result);
- MemoryUtils::ensureExists(getModule()->memory);
+ MemoryUtils::ensureExists(getModule());
ABI::wasm2js::ensureHelpers(getModule());
}
diff --git a/src/passes/InstrumentMemory.cpp b/src/passes/InstrumentMemory.cpp
index 1180c5183..486bc7c1f 100644
--- a/src/passes/InstrumentMemory.cpp
+++ b/src/passes/InstrumentMemory.cpp
@@ -100,8 +100,9 @@ struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> {
void visitLoad(Load* curr) {
id++;
Builder builder(*getModule());
- auto indexType = getModule()->memory.indexType;
- auto offset = builder.makeConstPtr(curr->offset.addr);
+ auto mem = getModule()->getMemory(curr->memory);
+ auto indexType = mem->indexType;
+ auto offset = builder.makeConstPtr(curr->offset.addr, indexType);
curr->ptr = builder.makeCall(load_ptr,
{builder.makeConst(int32_t(id)),
builder.makeConst(int32_t(curr->bytes)),
@@ -132,8 +133,9 @@ struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> {
void visitStore(Store* curr) {
id++;
Builder builder(*getModule());
- auto indexType = getModule()->memory.indexType;
- auto offset = builder.makeConstPtr(curr->offset.addr);
+ auto mem = getModule()->getMemory(curr->memory);
+ auto indexType = mem->indexType;
+ auto offset = builder.makeConstPtr(curr->offset.addr, indexType);
curr->ptr = builder.makeCall(store_ptr,
{builder.makeConst(int32_t(id)),
builder.makeConst(int32_t(curr->bytes)),
@@ -246,7 +248,8 @@ struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> {
}
void visitModule(Module* curr) {
- auto indexType = curr->memory.indexType;
+ auto indexType =
+ curr->memories.empty() ? Type::i32 : curr->memories[0]->indexType;
// Load.
addImport(
diff --git a/src/passes/Memory64Lowering.cpp b/src/passes/Memory64Lowering.cpp
index 4c2770e3a..7e581c22b 100644
--- a/src/passes/Memory64Lowering.cpp
+++ b/src/passes/Memory64Lowering.cpp
@@ -30,79 +30,101 @@ namespace wasm {
struct Memory64Lowering : public WalkerPass<PostWalker<Memory64Lowering>> {
void run(PassRunner* runner, Module* module) override {
- if (module->memory.is64()) {
- super::run(runner, module);
- }
+ super::run(runner, module);
}
- void wrapAddress64(Expression*& ptr) {
+ void wrapAddress64(Expression*& ptr, Name memoryName) {
if (ptr->type == Type::unreachable) {
return;
}
auto& module = *getModule();
- assert(module.memory.is64());
- assert(ptr->type == Type::i64);
- Builder builder(module);
- ptr = builder.makeUnary(UnaryOp::WrapInt64, ptr);
+ auto memory = module.getMemory(memoryName);
+ if (memory->is64()) {
+ assert(ptr->type == Type::i64);
+ Builder builder(module);
+ ptr = builder.makeUnary(UnaryOp::WrapInt64, ptr);
+ }
}
- void extendAddress64(Expression*& ptr) {
+ void extendAddress64(Expression*& ptr, Name memoryName) {
if (ptr->type == Type::unreachable) {
return;
}
auto& module = *getModule();
- assert(module.memory.is64());
- assert(ptr->type == Type::i64);
- ptr->type = Type::i32;
- Builder builder(module);
- ptr = builder.makeUnary(UnaryOp::ExtendUInt32, ptr);
+ auto memory = module.getMemory(memoryName);
+ if (memory->is64()) {
+ assert(ptr->type == Type::i64);
+ ptr->type = Type::i32;
+ Builder builder(module);
+ ptr = builder.makeUnary(UnaryOp::ExtendUInt32, ptr);
+ }
}
- void visitLoad(Load* curr) { wrapAddress64(curr->ptr); }
+ void visitLoad(Load* curr) { wrapAddress64(curr->ptr, curr->memory); }
- void visitStore(Store* curr) { wrapAddress64(curr->ptr); }
+ void visitStore(Store* curr) { wrapAddress64(curr->ptr, curr->memory); }
void visitMemorySize(MemorySize* curr) {
- auto size = static_cast<Expression*>(curr);
- extendAddress64(size);
- curr->ptrType = Type::i32;
- replaceCurrent(size);
+ auto& module = *getModule();
+ auto memory = module.getMemory(curr->memory);
+ if (memory->is64()) {
+ auto size = static_cast<Expression*>(curr);
+ extendAddress64(size, curr->memory);
+ curr->ptrType = Type::i32;
+ replaceCurrent(size);
+ }
}
void visitMemoryGrow(MemoryGrow* curr) {
- wrapAddress64(curr->delta);
- auto size = static_cast<Expression*>(curr);
- extendAddress64(size);
- curr->ptrType = Type::i32;
- replaceCurrent(size);
+ auto& module = *getModule();
+ auto memory = module.getMemory(curr->memory);
+ if (memory->is64()) {
+ wrapAddress64(curr->delta, curr->memory);
+ auto size = static_cast<Expression*>(curr);
+ extendAddress64(size, curr->memory);
+ curr->ptrType = Type::i32;
+ replaceCurrent(size);
+ }
}
- void visitMemoryInit(MemoryInit* curr) { wrapAddress64(curr->dest); }
+ void visitMemoryInit(MemoryInit* curr) {
+ wrapAddress64(curr->dest, curr->memory);
+ }
void visitMemoryFill(MemoryFill* curr) {
- wrapAddress64(curr->dest);
- wrapAddress64(curr->size);
+ wrapAddress64(curr->dest, curr->memory);
+ wrapAddress64(curr->size, curr->memory);
}
void visitMemoryCopy(MemoryCopy* curr) {
- wrapAddress64(curr->dest);
- wrapAddress64(curr->source);
- wrapAddress64(curr->size);
+ wrapAddress64(curr->dest, curr->destMemory);
+ wrapAddress64(curr->source, curr->sourceMemory);
+ wrapAddress64(curr->size, curr->destMemory);
}
- void visitAtomicRMW(AtomicRMW* curr) { wrapAddress64(curr->ptr); }
+ void visitAtomicRMW(AtomicRMW* curr) {
+ wrapAddress64(curr->ptr, curr->memory);
+ }
- void visitAtomicCmpxchg(AtomicCmpxchg* curr) { wrapAddress64(curr->ptr); }
+ void visitAtomicCmpxchg(AtomicCmpxchg* curr) {
+ wrapAddress64(curr->ptr, curr->memory);
+ }
- void visitAtomicWait(AtomicWait* curr) { wrapAddress64(curr->ptr); }
+ void visitAtomicWait(AtomicWait* curr) {
+ wrapAddress64(curr->ptr, curr->memory);
+ }
- void visitAtomicNotify(AtomicNotify* curr) { wrapAddress64(curr->ptr); }
+ void visitAtomicNotify(AtomicNotify* curr) {
+ wrapAddress64(curr->ptr, curr->memory);
+ }
void visitMemory(Memory* memory) {
// This is visited last.
- memory->indexType = Type::i32;
- if (memory->hasMax() && memory->max > Memory::kMaxSize32) {
- memory->max = Memory::kMaxSize32;
+ if (memory->is64()) {
+ memory->indexType = Type::i32;
+ if (memory->hasMax() && memory->max > Memory::kMaxSize32) {
+ memory->max = Memory::kMaxSize32;
+ }
}
}
diff --git a/src/passes/MemoryPacking.cpp b/src/passes/MemoryPacking.cpp
index 3be08e7b9..cd4a6698d 100644
--- a/src/passes/MemoryPacking.cpp
+++ b/src/passes/MemoryPacking.cpp
@@ -83,24 +83,26 @@ const size_t DATA_DROP_SIZE = 3;
Expression*
makeGtShiftedMemorySize(Builder& builder, Module& module, MemoryInit* curr) {
+ auto mem = module.getMemory(curr->memory);
return builder.makeBinary(
- module.memory.is64() ? GtUInt64 : GtUInt32,
+ mem->is64() ? GtUInt64 : GtUInt32,
curr->dest,
- builder.makeBinary(module.memory.is64() ? ShlInt64 : ShlInt32,
- builder.makeMemorySize(),
- builder.makeConstPtr(16)));
+ builder.makeBinary(mem->is64() ? ShlInt64 : ShlInt32,
+ builder.makeMemorySize(mem->name),
+ builder.makeConstPtr(16, mem->indexType)));
}
} // anonymous namespace
struct MemoryPacking : public Pass {
void run(PassRunner* runner, Module* module) override;
- bool canOptimize(const Memory& memory,
+ bool canOptimize(std::vector<std::unique_ptr<Memory>>& memories,
std::vector<std::unique_ptr<DataSegment>>& dataSegments,
const PassOptions& passOptions);
void optimizeBulkMemoryOps(PassRunner* runner, Module* module);
void getSegmentReferrers(Module* module, ReferrersMap& referrers);
- void dropUnusedSegments(std::vector<std::unique_ptr<DataSegment>>& segments,
+ void dropUnusedSegments(Module* module,
+ std::vector<std::unique_ptr<DataSegment>>& segments,
ReferrersMap& referrers);
bool canSplit(const std::unique_ptr<DataSegment>& segment,
const Referrers& referrers);
@@ -123,7 +125,8 @@ struct MemoryPacking : public Pass {
};
void MemoryPacking::run(PassRunner* runner, Module* module) {
- if (!canOptimize(module->memory, module->dataSegments, runner->options)) {
+ // Does not have multi-memories support
+ if (!canOptimize(module->memories, module->dataSegments, runner->options)) {
return;
}
@@ -140,7 +143,7 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
// like, such as memory.inits not having both zero offset and size.
optimizeBulkMemoryOps(runner, module);
getSegmentReferrers(module, referrers);
- dropUnusedSegments(segments, referrers);
+ dropUnusedSegments(module, segments, referrers);
}
// The new, split memory segments
@@ -171,6 +174,7 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
}
segments.swap(packed);
+ module->updateDataSegmentsMap();
if (module->features.hasBulkMemory()) {
replaceBulkMemoryOps(runner, module, replacements);
@@ -178,17 +182,17 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
}
bool MemoryPacking::canOptimize(
- const Memory& memory,
+ std::vector<std::unique_ptr<Memory>>& memories,
std::vector<std::unique_ptr<DataSegment>>& dataSegments,
const PassOptions& passOptions) {
- if (!memory.exists) {
+ if (memories.empty() || memories.size() > 1) {
return false;
}
-
+ auto& memory = memories[0];
// We must optimize under the assumption that memory has been initialized to
// zero. That is the case for a memory declared in the module, but for a
// memory that is imported, we must be told that it is zero-initialized.
- if (memory.imported() && !passOptions.zeroFilledMemory) {
+ if (memory->imported() && !passOptions.zeroFilledMemory) {
return false;
}
@@ -472,6 +476,7 @@ void MemoryPacking::getSegmentReferrers(Module* module,
}
void MemoryPacking::dropUnusedSegments(
+ Module* module,
std::vector<std::unique_ptr<DataSegment>>& segments,
ReferrersMap& referrers) {
std::vector<std::unique_ptr<DataSegment>> usedSegments;
@@ -508,6 +513,7 @@ void MemoryPacking::dropUnusedSegments(
}
}
std::swap(segments, usedSegments);
+ module->updateDataSegmentsMap();
std::swap(referrers, usedReferrers);
}
@@ -563,6 +569,7 @@ void MemoryPacking::createSplitSegments(
segmentCount++;
}
auto curr = Builder::makeDataSegment(name,
+ segment->memory,
segment->isPassive,
offset,
&segment->data[range.start],
@@ -711,11 +718,12 @@ void MemoryPacking::createReplacements(Module* module,
// Create new memory.init or memory.fill
if (range.isZero) {
Expression* value = builder.makeConst(Literal::makeZero(Type::i32));
- appendResult(builder.makeMemoryFill(dest, value, size));
+ appendResult(builder.makeMemoryFill(dest, value, size, init->memory));
} else {
size_t offsetBytes = std::max(start, range.start) - range.start;
Expression* offset = builder.makeConst(int32_t(offsetBytes));
- appendResult(builder.makeMemoryInit(initIndex, dest, offset, size));
+ appendResult(
+ builder.makeMemoryInit(initIndex, dest, offset, size, init->memory));
initIndex++;
}
}
diff --git a/src/passes/Metrics.cpp b/src/passes/Metrics.cpp
index 70d79c12c..480273da9 100644
--- a/src/passes/Metrics.cpp
+++ b/src/passes/Metrics.cpp
@@ -54,22 +54,25 @@ struct Metrics
ModuleUtils::iterDefinedGlobals(*module,
[&](Global* curr) { walkGlobal(curr); });
- // add imports / funcs / globals / exports / tables
+ // add imports / funcs / globals / exports / tables / memories
counts["[imports]"] = imports.getNumImports();
counts["[funcs]"] = imports.getNumDefinedFunctions();
counts["[globals]"] = imports.getNumDefinedGlobals();
counts["[tags]"] = imports.getNumDefinedTags();
counts["[exports]"] = module->exports.size();
counts["[tables]"] = imports.getNumDefinedTables();
+ counts["[memories]"] = imports.getNumDefinedMemories();
// add memory
- walkMemory(&module->memory);
+ for (auto& memory : module->memories) {
+ walkMemory(memory.get());
+ }
Index size = 0;
for (auto& segment : module->dataSegments) {
walkDataSegment(segment.get());
size += segment->data.size();
}
- if (!module->dataSegments.empty()) {
+ if (!module->memories.empty()) {
counts["[memory-data]"] = size;
}
diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp
index e3a5774b1..4fe26e8ee 100644
--- a/src/passes/OptimizeInstructions.cpp
+++ b/src/passes/OptimizeInstructions.cpp
@@ -1159,14 +1159,14 @@ struct OptimizeInstructions
if (curr->type == Type::unreachable) {
return;
}
- optimizeMemoryAccess(curr->ptr, curr->offset);
+ optimizeMemoryAccess(curr->ptr, curr->offset, curr->memory);
}
void visitStore(Store* curr) {
if (curr->type == Type::unreachable) {
return;
}
- optimizeMemoryAccess(curr->ptr, curr->offset);
+ optimizeMemoryAccess(curr->ptr, curr->offset, curr->memory);
optimizeStoredValue(curr->value, curr->bytes);
if (auto* unary = curr->value->dynCast<Unary>()) {
if (unary->op == WrapInt64) {
@@ -2912,7 +2912,7 @@ private:
}
// fold constant factors into the offset
- void optimizeMemoryAccess(Expression*& ptr, Address& offset) {
+ void optimizeMemoryAccess(Expression*& ptr, Address& offset, Name memory) {
// ptr may be a const, but it isn't worth folding that in (we still have a
// const); in fact, it's better to do the opposite for gzip purposes as well
// as for readability.
@@ -2920,7 +2920,8 @@ private:
if (last) {
uint64_t value64 = last->value.getInteger();
uint64_t offset64 = offset;
- if (getModule()->memory.is64()) {
+ auto mem = getModule()->getMemory(memory);
+ if (mem->is64()) {
last->value = Literal(int64_t(value64 + offset64));
offset = 0;
} else {
@@ -3797,36 +3798,53 @@ private:
case 1:
case 2:
case 4: {
- return builder.makeStore(
- bytes, // bytes
- 0, // offset
- 1, // align
- memCopy->dest,
- builder.makeLoad(bytes, false, 0, 1, memCopy->source, Type::i32),
- Type::i32);
+ return builder.makeStore(bytes, // bytes
+ 0, // offset
+ 1, // align
+ memCopy->dest,
+ builder.makeLoad(bytes,
+ false,
+ 0,
+ 1,
+ memCopy->source,
+ Type::i32,
+ memCopy->sourceMemory),
+ Type::i32,
+ memCopy->destMemory);
}
case 8: {
- return builder.makeStore(
- bytes, // bytes
- 0, // offset
- 1, // align
- memCopy->dest,
- builder.makeLoad(bytes, false, 0, 1, memCopy->source, Type::i64),
- Type::i64);
+ return builder.makeStore(bytes, // bytes
+ 0, // offset
+ 1, // align
+ memCopy->dest,
+ builder.makeLoad(bytes,
+ false,
+ 0,
+ 1,
+ memCopy->source,
+ Type::i64,
+ memCopy->sourceMemory),
+ Type::i64,
+ memCopy->destMemory);
}
case 16: {
if (options.shrinkLevel == 0) {
// This adds an extra 2 bytes so apply it only for
// minimal shrink level
if (getModule()->features.hasSIMD()) {
- return builder.makeStore(
- bytes, // bytes
- 0, // offset
- 1, // align
- memCopy->dest,
- builder.makeLoad(
- bytes, false, 0, 1, memCopy->source, Type::v128),
- Type::v128);
+ return builder.makeStore(bytes, // bytes
+ 0, // offset
+ 1, // align
+ memCopy->dest,
+ builder.makeLoad(bytes,
+ false,
+ 0,
+ 1,
+ memCopy->source,
+ Type::v128,
+ memCopy->sourceMemory),
+ Type::v128,
+ memCopy->destMemory);
}
}
break;
@@ -3873,7 +3891,8 @@ private:
align,
memFill->dest,
builder.makeConst<uint32_t>(value),
- Type::i32);
+ Type::i32,
+ memFill->memory);
}
case 2: {
return builder.makeStore(2,
@@ -3881,7 +3900,8 @@ private:
align,
memFill->dest,
builder.makeConst<uint32_t>(value * 0x0101U),
- Type::i32);
+ Type::i32,
+ memFill->memory);
}
case 4: {
// transform only when "value" or shrinkLevel equal to zero due to
@@ -3893,7 +3913,8 @@ private:
align,
memFill->dest,
builder.makeConst<uint32_t>(value * 0x01010101U),
- Type::i32);
+ Type::i32,
+ memFill->memory);
}
break;
}
@@ -3907,7 +3928,8 @@ private:
align,
memFill->dest,
builder.makeConst<uint64_t>(value * 0x0101010101010101ULL),
- Type::i64);
+ Type::i64,
+ memFill->memory);
}
break;
}
@@ -3921,7 +3943,8 @@ private:
align,
memFill->dest,
builder.makeConst<uint8_t[16]>(values),
- Type::v128);
+ Type::v128,
+ memFill->memory);
} else {
// { i64.store(d, C', 0), i64.store(d, C', 8) }
auto destType = memFill->dest->type;
@@ -3933,14 +3956,16 @@ private:
align,
builder.makeLocalTee(tempLocal, memFill->dest, destType),
builder.makeConst<uint64_t>(value * 0x0101010101010101ULL),
- Type::i64),
+ Type::i64,
+ memFill->memory),
builder.makeStore(
8,
offset + 8,
align,
builder.makeLocalGet(tempLocal, destType),
builder.makeConst<uint64_t>(value * 0x0101010101010101ULL),
- Type::i64),
+ Type::i64,
+ memFill->memory),
});
}
}
@@ -3952,8 +3977,13 @@ private:
}
// memory.fill(d, v, 1) ==> store8(d, v)
if (bytes == 1LL) {
- return builder.makeStore(
- 1, offset, align, memFill->dest, memFill->value, Type::i32);
+ return builder.makeStore(1,
+ offset,
+ align,
+ memFill->dest,
+ memFill->value,
+ Type::i32,
+ memFill->memory);
}
return nullptr;
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp
index 62dc58c48..87c33ed93 100644
--- a/src/passes/Print.cpp
+++ b/src/passes/Print.cpp
@@ -59,6 +59,14 @@ std::ostream& printName(Name name, std::ostream& o) {
return o;
}
+std::ostream& printMemoryName(Name name, std::ostream& o, Module* wasm) {
+ if (!wasm || wasm->memories.size() > 1) {
+ o << ' ';
+ printName(name, o);
+ }
+ return o;
+}
+
static std::ostream& printLocal(Index index, Function* func, std::ostream& o) {
Name name;
if (func) {
@@ -521,6 +529,7 @@ struct PrintExpressionContents
o << (curr->signed_ ? "_s" : "_u");
}
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
@@ -546,6 +555,7 @@ struct PrintExpressionContents
}
}
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
@@ -596,6 +606,7 @@ struct PrintExpressionContents
o << "_u";
}
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
@@ -609,6 +620,7 @@ struct PrintExpressionContents
o << "_u";
}
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
@@ -619,12 +631,14 @@ struct PrintExpressionContents
assert(type == Type::i32 || type == Type::i64);
o << "memory.atomic.wait" << (type == Type::i32 ? "32" : "64");
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
}
void visitAtomicNotify(AtomicNotify* curr) {
printMedium(o, "memory.atomic.notify");
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
@@ -813,6 +827,7 @@ struct PrintExpressionContents
break;
}
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
@@ -849,6 +864,7 @@ struct PrintExpressionContents
break;
}
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
if (curr->offset) {
o << " offset=" << curr->offset;
}
@@ -861,6 +877,7 @@ struct PrintExpressionContents
prepareColor(o);
o << "memory.init";
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
o << ' ' << curr->segment;
}
void visitDataDrop(DataDrop* curr) {
@@ -873,11 +890,14 @@ struct PrintExpressionContents
prepareColor(o);
o << "memory.copy";
restoreNormalColor(o);
+ printMemoryName(curr->destMemory, o, wasm);
+ printMemoryName(curr->sourceMemory, o, wasm);
}
void visitMemoryFill(MemoryFill* curr) {
prepareColor(o);
o << "memory.fill";
restoreNormalColor(o);
+ printMemoryName(curr->memory, o, wasm);
}
void visitConst(Const* curr) {
o << curr->value.type << ".const " << curr->value;
@@ -1917,8 +1937,14 @@ struct PrintExpressionContents
}
void visitDrop(Drop* curr) { printMedium(o, "drop"); }
void visitReturn(Return* curr) { printMedium(o, "return"); }
- void visitMemorySize(MemorySize* curr) { printMedium(o, "memory.size"); }
- void visitMemoryGrow(MemoryGrow* curr) { printMedium(o, "memory.grow"); }
+ void visitMemorySize(MemorySize* curr) {
+ printMedium(o, "memory.size");
+ printMemoryName(curr->memory, o, wasm);
+ }
+ void visitMemoryGrow(MemoryGrow* curr) {
+ printMedium(o, "memory.grow");
+ printMemoryName(curr->memory, o, wasm);
+ }
void visitRefNull(RefNull* curr) {
printMedium(o, "ref.null ");
printHeapType(o, curr->type.getHeapType(), wasm);
@@ -3129,14 +3155,11 @@ struct PrintSExpression : public UnifiedExpressionVisitor<PrintSExpression> {
o << ")";
}
void visitMemory(Memory* curr) {
- if (!curr->exists) {
- return;
- }
if (curr->imported()) {
doIndent(o, indent);
o << '(';
emitImportHeader(curr);
- printMemoryHeader(&currModule->memory);
+ printMemoryHeader(curr);
o << ')' << maybeNewLine;
} else {
doIndent(o, indent);
@@ -3390,6 +3413,7 @@ public:
PrintSExpression print(o);
print.setFull(true);
print.setDebugInfo(runner->options.debugInfo);
+ print.currModule = module;
print.visitModule(module);
}
};
@@ -3407,6 +3431,7 @@ public:
PrintSExpression print(o);
print.setDebugInfo(runner->options.debugInfo);
print.setStackIR(true);
+ print.currModule = module;
print.visitModule(module);
}
};
diff --git a/src/passes/RemoveNonJSOps.cpp b/src/passes/RemoveNonJSOps.cpp
index 1c112e760..84df4ec1e 100644
--- a/src/passes/RemoveNonJSOps.cpp
+++ b/src/passes/RemoveNonJSOps.cpp
@@ -122,7 +122,7 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
}
// Intrinsics may use memory, so ensure the module has one.
- MemoryUtils::ensureExists(module->memory);
+ MemoryUtils::ensureExists(module);
// Add missing globals
for (auto& [name, type] : neededImportedGlobals) {
diff --git a/src/passes/RemoveUnusedModuleElements.cpp b/src/passes/RemoveUnusedModuleElements.cpp
index 7116475cd..1b1174745 100644
--- a/src/passes/RemoveUnusedModuleElements.cpp
+++ b/src/passes/RemoveUnusedModuleElements.cpp
@@ -38,6 +38,7 @@ typedef std::pair<ModuleElementKind, Name> ModuleElement;
// Finds reachabilities
// TODO: use Effects to determine if a memory is used
+// This pass does not have multi-memories support
struct ReachabilityAnalyzer : public PostWalker<ReachabilityAnalyzer> {
Module* module;
@@ -281,7 +282,7 @@ struct RemoveUnusedModuleElements : public Pass {
}
// Check for special imports, which are roots.
bool importsMemory = false;
- if (module->memory.imported()) {
+ if (!module->memories.empty() && module->memories[0]->imported()) {
importsMemory = true;
}
// For now, all functions that can be called indirectly are marked as roots.
@@ -367,13 +368,10 @@ struct RemoveUnusedModuleElements : public Pass {
if (!importsMemory) {
// The memory is unobservable to the outside, we can remove the
// contents.
- module->dataSegments.clear();
+ module->removeDataSegments([&](DataSegment* curr) { return true; });
}
- if (module->dataSegments.empty()) {
- module->memory.exists = false;
- module->memory.module = module->memory.base = Name();
- module->memory.initial = 0;
- module->memory.max = 0;
+ if (module->dataSegments.empty() && !module->memories.empty()) {
+ module->removeMemory(module->memories[0]->name);
}
}
}
diff --git a/src/passes/SafeHeap.cpp b/src/passes/SafeHeap.cpp
index 068c8ef73..eccb521d2 100644
--- a/src/passes/SafeHeap.cpp
+++ b/src/passes/SafeHeap.cpp
@@ -82,10 +82,11 @@ struct AccessInstrumenter : public WalkerPass<PostWalker<AccessInstrumenter>> {
return;
}
Builder builder(*getModule());
- replaceCurrent(
- builder.makeCall(getLoadName(curr),
- {curr->ptr, builder.makeConstPtr(curr->offset.addr)},
- curr->type));
+ auto memory = getModule()->getMemory(curr->memory);
+ replaceCurrent(builder.makeCall(
+ getLoadName(curr),
+ {curr->ptr, builder.makeConstPtr(curr->offset.addr, memory->indexType)},
+ curr->type));
}
void visitStore(Store* curr) {
@@ -94,9 +95,12 @@ struct AccessInstrumenter : public WalkerPass<PostWalker<AccessInstrumenter>> {
return;
}
Builder builder(*getModule());
+ auto memory = getModule()->getMemory(curr->memory);
replaceCurrent(builder.makeCall(
getStoreName(curr),
- {curr->ptr, builder.makeConstPtr(curr->offset.addr), curr->value},
+ {curr->ptr,
+ builder.makeConstPtr(curr->offset.addr, memory->indexType),
+ curr->value},
Type::none));
}
};
@@ -131,6 +135,7 @@ struct SafeHeap : public Pass {
void run(PassRunner* runner, Module* module) override {
options = runner->options;
+ assert(!module->memories.empty());
// add imports
addImports(module);
// instrument loads and stores
@@ -151,7 +156,7 @@ struct SafeHeap : public Pass {
void addImports(Module* module) {
ImportInfo info(*module);
- auto indexType = module->memory.indexType;
+ auto indexType = module->memories[0]->indexType;
if (auto* existing = info.getImportedFunction(ENV, GET_SBRK_PTR)) {
getSbrkPtr = existing->name;
} else if (auto* existing = module->getExportOrNull(GET_SBRK_PTR)) {
@@ -202,6 +207,7 @@ struct SafeHeap : public Pass {
continue;
}
load.type = type;
+ load.memory = module->memories[0]->name;
for (Index bytes : {1, 2, 4, 8, 16}) {
load.bytes = bytes;
if (bytes > type.getByteSize() || (type == Type::f32 && bytes != 4) ||
@@ -221,8 +227,9 @@ struct SafeHeap : public Pass {
}
for (auto isAtomic : {true, false}) {
load.isAtomic = isAtomic;
- if (isAtomic && !isPossibleAtomicOperation(
- align, bytes, module->memory.shared, type)) {
+ if (isAtomic &&
+ !isPossibleAtomicOperation(
+ align, bytes, module->memories[0]->shared, type)) {
continue;
}
addLoadFunc(load, module);
@@ -240,6 +247,7 @@ struct SafeHeap : public Pass {
}
store.valueType = valueType;
store.type = Type::none;
+ store.memory = module->memories[0]->name;
for (Index bytes : {1, 2, 4, 8, 16}) {
store.bytes = bytes;
if (bytes > valueType.getByteSize() ||
@@ -255,8 +263,9 @@ struct SafeHeap : public Pass {
}
for (auto isAtomic : {true, false}) {
store.isAtomic = isAtomic;
- if (isAtomic && !isPossibleAtomicOperation(
- align, bytes, module->memory.shared, valueType)) {
+ if (isAtomic &&
+ !isPossibleAtomicOperation(
+ align, bytes, module->memories[0]->shared, valueType)) {
continue;
}
addStoreFunc(store, module);
@@ -273,22 +282,30 @@ struct SafeHeap : public Pass {
return;
}
// pointer, offset
- auto indexType = module->memory.indexType;
+ auto memory = module->getMemory(style.memory);
+ auto indexType = memory->indexType;
auto funcSig = Signature({indexType, indexType}, style.type);
auto func = Builder::makeFunction(name, funcSig, {indexType});
Builder builder(*module);
auto* block = builder.makeBlock();
block->list.push_back(builder.makeLocalSet(
2,
- builder.makeBinary(module->memory.is64() ? AddInt64 : AddInt32,
+ builder.makeBinary(memory->is64() ? AddInt64 : AddInt32,
builder.makeLocalGet(0, indexType),
builder.makeLocalGet(1, indexType))));
// check for reading past valid memory: if pointer + offset + bytes
- block->list.push_back(
- makeBoundsCheck(style.type, builder, 2, style.bytes, module));
+ block->list.push_back(makeBoundsCheck(style.type,
+ builder,
+ 2,
+ style.bytes,
+ module,
+ memory->indexType,
+ memory->is64(),
+ memory->name));
// check proper alignment
if (style.align > 1) {
- block->list.push_back(makeAlignCheck(style.align, builder, 2, module));
+ block->list.push_back(
+ makeAlignCheck(style.align, builder, 2, module, memory->name));
}
// do the load
auto* load = module->allocator.alloc<Load>();
@@ -312,7 +329,9 @@ struct SafeHeap : public Pass {
if (module->getFunctionOrNull(name)) {
return;
}
- auto indexType = module->memory.indexType;
+ auto memory = module->getMemory(style.memory);
+ auto indexType = memory->indexType;
+ bool is64 = memory->is64();
// pointer, offset, value
auto funcSig =
Signature({indexType, indexType, style.valueType}, Type::none);
@@ -321,19 +340,27 @@ struct SafeHeap : public Pass {
auto* block = builder.makeBlock();
block->list.push_back(builder.makeLocalSet(
3,
- builder.makeBinary(module->memory.is64() ? AddInt64 : AddInt32,
+ builder.makeBinary(is64 ? AddInt64 : AddInt32,
builder.makeLocalGet(0, indexType),
builder.makeLocalGet(1, indexType))));
// check for reading past valid memory: if pointer + offset + bytes
- block->list.push_back(
- makeBoundsCheck(style.valueType, builder, 3, style.bytes, module));
+ block->list.push_back(makeBoundsCheck(style.valueType,
+ builder,
+ 3,
+ style.bytes,
+ module,
+ indexType,
+ is64,
+ memory->name));
// check proper alignment
if (style.align > 1) {
- block->list.push_back(makeAlignCheck(style.align, builder, 3, module));
+ block->list.push_back(
+ makeAlignCheck(style.align, builder, 3, module, memory->name));
}
// do the store
auto* store = module->allocator.alloc<Store>();
*store = style; // basically the same as the template we are given!
+ store->memory = memory->name;
store->ptr = builder.makeLocalGet(3, indexType);
store->value = builder.makeLocalGet(2, style.valueType);
block->list.push_back(store);
@@ -342,11 +369,15 @@ struct SafeHeap : public Pass {
module->addFunction(std::move(func));
}
- Expression*
- makeAlignCheck(Address align, Builder& builder, Index local, Module* module) {
- auto indexType = module->memory.indexType;
+ Expression* makeAlignCheck(Address align,
+ Builder& builder,
+ Index local,
+ Module* module,
+ Name memoryName) {
+ auto memory = module->getMemory(memoryName);
+ auto indexType = memory->indexType;
Expression* ptrBits = builder.makeLocalGet(local, indexType);
- if (module->memory.is64()) {
+ if (memory->is64()) {
ptrBits = builder.makeUnary(WrapInt64, ptrBits);
}
return builder.makeIf(
@@ -355,17 +386,21 @@ struct SafeHeap : public Pass {
builder.makeCall(alignfault, {}, Type::none));
}
- Expression* makeBoundsCheck(
- Type type, Builder& builder, Index local, Index bytes, Module* module) {
- auto indexType = module->memory.indexType;
- auto upperOp = module->memory.is64()
- ? options.lowMemoryUnused ? LtUInt64 : EqInt64
- : options.lowMemoryUnused ? LtUInt32 : EqInt32;
+ Expression* makeBoundsCheck(Type type,
+ Builder& builder,
+ Index local,
+ Index bytes,
+ Module* module,
+ Type indexType,
+ bool is64,
+ Name memory) {
+ auto upperOp = is64 ? options.lowMemoryUnused ? LtUInt64 : EqInt64
+ : options.lowMemoryUnused ? LtUInt32 : EqInt32;
auto upperBound = options.lowMemoryUnused ? PassOptions::LowMemoryBound : 0;
Expression* brkLocation;
if (sbrk.is()) {
brkLocation =
- builder.makeCall(sbrk, {builder.makeConstPtr(0)}, indexType);
+ builder.makeCall(sbrk, {builder.makeConstPtr(0, indexType)}, indexType);
} else {
Expression* sbrkPtr;
if (dynamicTopPtr.is()) {
@@ -373,22 +408,23 @@ struct SafeHeap : public Pass {
} else {
sbrkPtr = builder.makeCall(getSbrkPtr, {}, indexType);
}
- auto size = module->memory.is64() ? 8 : 4;
- brkLocation = builder.makeLoad(size, false, 0, size, sbrkPtr, indexType);
+ auto size = is64 ? 8 : 4;
+ brkLocation =
+ builder.makeLoad(size, false, 0, size, sbrkPtr, indexType, memory);
}
- auto gtuOp = module->memory.is64() ? GtUInt64 : GtUInt32;
- auto addOp = module->memory.is64() ? AddInt64 : AddInt32;
+ auto gtuOp = is64 ? GtUInt64 : GtUInt32;
+ auto addOp = is64 ? AddInt64 : AddInt32;
return builder.makeIf(
builder.makeBinary(
OrInt32,
builder.makeBinary(upperOp,
builder.makeLocalGet(local, indexType),
- builder.makeConstPtr(upperBound)),
+ builder.makeConstPtr(upperBound, indexType)),
builder.makeBinary(
gtuOp,
builder.makeBinary(addOp,
builder.makeLocalGet(local, indexType),
- builder.makeConstPtr(bytes)),
+ builder.makeConstPtr(bytes, indexType)),
brkLocation)),
builder.makeCall(segfault, {}, Type::none));
}
diff --git a/src/passes/SpillPointers.cpp b/src/passes/SpillPointers.cpp
index 46b1fb47c..04193c2d2 100644
--- a/src/passes/SpillPointers.cpp
+++ b/src/passes/SpillPointers.cpp
@@ -74,7 +74,7 @@ struct SpillPointers
Type pointerType;
void spillPointers() {
- pointerType = getModule()->memory.indexType;
+ pointerType = getModule()->memories[0]->indexType;
// we only care about possible pointers
auto* func = getFunction();
@@ -192,7 +192,8 @@ struct SpillPointers
pointerType.getByteSize(),
builder.makeLocalGet(spillLocal, pointerType),
builder.makeLocalGet(index, pointerType),
- pointerType));
+ pointerType,
+ getModule()->memories[0]->name));
}
// add the (modified) call
block->list.push_back(call);
diff --git a/src/passes/StackCheck.cpp b/src/passes/StackCheck.cpp
index 4b1054597..fd7ed6c0f 100644
--- a/src/passes/StackCheck.cpp
+++ b/src/passes/StackCheck.cpp
@@ -146,15 +146,17 @@ struct StackCheck : public Pass {
Builder builder(*module);
// Add the globals.
+ Type indexType =
+ module->memories.empty() ? Type::i32 : module->memories[0]->indexType;
auto stackBase =
module->addGlobal(builder.makeGlobal(stackBaseName,
stackPointer->type,
- builder.makeConstPtr(0),
+ builder.makeConstPtr(0, indexType),
Builder::Mutable));
auto stackLimit =
module->addGlobal(builder.makeGlobal(stackLimitName,
stackPointer->type,
- builder.makeConstPtr(0),
+ builder.makeConstPtr(0, indexType),
Builder::Mutable));
// Instrument all the code.
diff --git a/src/shell-interface.h b/src/shell-interface.h
index d1cf3290e..98349055b 100644
--- a/src/shell-interface.h
+++ b/src/shell-interface.h
@@ -57,8 +57,6 @@ struct ShellExternalInterface : ModuleRunner::ExternalInterface {
static_assert(!(sizeof(T) & (sizeof(T) - 1)), "must be a power of 2");
return 0 == (reinterpret_cast<uintptr_t>(address) & (sizeof(T) - 1));
}
- Memory(Memory&) = delete;
- Memory& operator=(const Memory&) = delete;
public:
Memory() = default;
@@ -92,14 +90,14 @@ struct ShellExternalInterface : ModuleRunner::ExternalInterface {
return loaded;
}
}
- } memory;
+ };
+ std::map<Name, Memory> memories;
std::unordered_map<Name, std::vector<Literal>> tables;
std::map<Name, std::shared_ptr<ModuleRunner>> linkedInstances;
ShellExternalInterface(
- std::map<Name, std::shared_ptr<ModuleRunner>> linkedInstances_ = {})
- : memory() {
+ std::map<Name, std::shared_ptr<ModuleRunner>> linkedInstances_ = {}) {
linkedInstances.swap(linkedInstances_);
}
virtual ~ShellExternalInterface() = default;
@@ -114,9 +112,11 @@ struct ShellExternalInterface : ModuleRunner::ExternalInterface {
}
void init(Module& wasm, ModuleRunner& instance) override {
- if (wasm.memory.exists && !wasm.memory.imported()) {
- memory.resize(wasm.memory.initial * wasm::Memory::kPageSize);
- }
+ ModuleUtils::iterDefinedMemories(wasm, [&](wasm::Memory* memory) {
+ auto shellMemory = Memory();
+ shellMemory.resize(memory->initial * wasm::Memory::kPageSize);
+ memories[memory->name] = shellMemory;
+ });
ModuleUtils::iterDefinedTables(
wasm, [&](Table* table) { tables[table->name].resize(table->initial); });
}
@@ -195,31 +195,119 @@ struct ShellExternalInterface : ModuleRunner::ExternalInterface {
}
}
- int8_t load8s(Address addr) override { return memory.get<int8_t>(addr); }
- uint8_t load8u(Address addr) override { return memory.get<uint8_t>(addr); }
- int16_t load16s(Address addr) override { return memory.get<int16_t>(addr); }
- uint16_t load16u(Address addr) override { return memory.get<uint16_t>(addr); }
- int32_t load32s(Address addr) override { return memory.get<int32_t>(addr); }
- uint32_t load32u(Address addr) override { return memory.get<uint32_t>(addr); }
- int64_t load64s(Address addr) override { return memory.get<int64_t>(addr); }
- uint64_t load64u(Address addr) override { return memory.get<uint64_t>(addr); }
- std::array<uint8_t, 16> load128(Address addr) override {
+ int8_t load8s(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load8s on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<int8_t>(addr);
+ }
+ uint8_t load8u(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load8u on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<uint8_t>(addr);
+ }
+ int16_t load16s(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load16s on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<int16_t>(addr);
+ }
+ uint16_t load16u(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load16u on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<uint16_t>(addr);
+ }
+ int32_t load32s(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load32s on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<int32_t>(addr);
+ }
+ uint32_t load32u(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load32u on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<uint32_t>(addr);
+ }
+ int64_t load64s(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load64s on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<int64_t>(addr);
+ }
+ uint64_t load64u(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load64u on non-existing memory");
+ }
+ auto& memory = it->second;
+ return memory.get<uint64_t>(addr);
+ }
+ std::array<uint8_t, 16> load128(Address addr, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("load128 on non-existing memory");
+ }
+ auto& memory = it->second;
return memory.get<std::array<uint8_t, 16>>(addr);
}
- void store8(Address addr, int8_t value) override {
+ void store8(Address addr, int8_t value, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("store8 on non-existing memory");
+ }
+ auto& memory = it->second;
memory.set<int8_t>(addr, value);
}
- void store16(Address addr, int16_t value) override {
+ void store16(Address addr, int16_t value, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("store16 on non-existing memory");
+ }
+ auto& memory = it->second;
memory.set<int16_t>(addr, value);
}
- void store32(Address addr, int32_t value) override {
+ void store32(Address addr, int32_t value, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("store32 on non-existing memory");
+ }
+ auto& memory = it->second;
memory.set<int32_t>(addr, value);
}
- void store64(Address addr, int64_t value) override {
+ void store64(Address addr, int64_t value, Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("store64 on non-existing memory");
+ }
+ auto& memory = it->second;
memory.set<int64_t>(addr, value);
}
- void store128(Address addr, const std::array<uint8_t, 16>& value) override {
+ void store128(Address addr,
+ const std::array<uint8_t, 16>& value,
+ Name memoryName) override {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("store128 on non-existing memory");
+ }
+ auto& memory = it->second;
memory.set<std::array<uint8_t, 16>>(addr, value);
}
@@ -250,12 +338,18 @@ struct ShellExternalInterface : ModuleRunner::ExternalInterface {
return table[index];
}
- bool growMemory(Address /*oldSize*/, Address newSize) override {
+ bool
+ growMemory(Name memoryName, Address /*oldSize*/, Address newSize) override {
// Apply a reasonable limit on memory size, 1GB, to avoid DOS on the
// interpreter.
if (newSize > 1024 * 1024 * 1024) {
return false;
}
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ trap("growMemory on non-existing memory");
+ }
+ auto& memory = it->second;
memory.resize(newSize);
return true;
}
diff --git a/src/tools/fuzzing/fuzzing.cpp b/src/tools/fuzzing/fuzzing.cpp
index 4d0a8d7b7..cdefe29f1 100644
--- a/src/tools/fuzzing/fuzzing.cpp
+++ b/src/tools/fuzzing/fuzzing.cpp
@@ -185,7 +185,7 @@ void TranslateToFuzzReader::build() {
void TranslateToFuzzReader::setupMemory() {
// Add memory itself
- MemoryUtils::ensureExists(wasm.memory);
+ MemoryUtils::ensureExists(&wasm);
if (wasm.features.hasBulkMemory()) {
size_t memCovered = 0;
// need at least one segment for memory.inits
@@ -202,12 +202,14 @@ void TranslateToFuzzReader::setupMemory() {
if (!segment->isPassive) {
segment->offset = builder.makeConst(int32_t(memCovered));
memCovered += segSize;
+ segment->memory = wasm.memories[0]->name;
}
wasm.dataSegments.push_back(std::move(segment));
}
} else {
// init some data
auto segment = builder.makeDataSegment();
+ segment->memory = wasm.memories[0]->name;
segment->offset = builder.makeConst(int32_t(0));
segment->setName(Name::fromInt(0), false);
wasm.dataSegments.push_back(std::move(segment));
@@ -229,7 +231,7 @@ void TranslateToFuzzReader::setupMemory() {
std::vector<Expression*> contents;
contents.push_back(
builder.makeLocalSet(0, builder.makeConst(uint32_t(5381))));
- auto zero = Literal::makeFromInt32(0, wasm.memory.indexType);
+ auto zero = Literal::makeFromInt32(0, wasm.memories[0]->indexType);
for (Index i = 0; i < USABLE_MEMORY; i++) {
contents.push_back(builder.makeLocalSet(
0,
@@ -241,7 +243,13 @@ void TranslateToFuzzReader::setupMemory() {
builder.makeLocalGet(0, Type::i32),
builder.makeConst(uint32_t(5))),
builder.makeLocalGet(0, Type::i32)),
- builder.makeLoad(1, false, i, 1, builder.makeConst(zero), Type::i32))));
+ builder.makeLoad(1,
+ false,
+ i,
+ 1,
+ builder.makeConst(zero),
+ Type::i32,
+ wasm.memories[0]->name))));
}
contents.push_back(builder.makeLocalGet(0, Type::i32));
auto* body = builder.makeBlock(contents);
@@ -251,7 +259,8 @@ void TranslateToFuzzReader::setupMemory() {
builder.makeExport(hasher->name, hasher->name, ExternalKind::Function));
// Export memory so JS fuzzing can use it
if (!wasm.getExportOrNull("memory")) {
- wasm.addExport(builder.makeExport("memory", "0", ExternalKind::Memory));
+ wasm.addExport(builder.makeExport(
+ "memory", wasm.memories[0]->name, ExternalKind::Memory));
}
}
@@ -354,25 +363,26 @@ void TranslateToFuzzReader::finalizeMemory() {
maxOffset = maxOffset + offset->value.getInteger();
}
}
- wasm.memory.initial = std::max(
- wasm.memory.initial,
+ wasm.memories[0]->initial = std::max(
+ wasm.memories[0]->initial,
Address((maxOffset + Memory::kPageSize - 1) / Memory::kPageSize));
}
- wasm.memory.initial = std::max(wasm.memory.initial, USABLE_MEMORY);
+ wasm.memories[0]->initial =
+ std::max(wasm.memories[0]->initial, USABLE_MEMORY);
// Avoid an unlimited memory size, which would make fuzzing very difficult
// as different VMs will run out of system memory in different ways.
- if (wasm.memory.max == Memory::kUnlimitedSize) {
- wasm.memory.max = wasm.memory.initial;
+ if (wasm.memories[0]->max == Memory::kUnlimitedSize) {
+ wasm.memories[0]->max = wasm.memories[0]->initial;
}
- if (wasm.memory.max <= wasm.memory.initial) {
+ if (wasm.memories[0]->max <= wasm.memories[0]->initial) {
// To allow growth to work (which a testcase may assume), try to make the
// maximum larger than the initial.
// TODO: scan the wasm for grow instructions?
- wasm.memory.max =
- std::min(Address(wasm.memory.initial + 1), Address(Memory::kMaxSize32));
+ wasm.memories[0]->max = std::min(Address(wasm.memories[0]->initial + 1),
+ Address(Memory::kMaxSize32));
}
// Avoid an imported memory (which the fuzz harness would need to handle).
- wasm.memory.module = wasm.memory.base = Name();
+ wasm.memories[0]->module = wasm.memories[0]->base = Name();
}
void TranslateToFuzzReader::finalizeTable() {
@@ -1403,12 +1413,12 @@ Expression* TranslateToFuzzReader::makeTupleExtract(Type type) {
}
Expression* TranslateToFuzzReader::makePointer() {
- auto* ret = make(wasm.memory.indexType);
+ auto* ret = make(wasm.memories[0]->indexType);
// with high probability, mask the pointer so it's in a reasonable
// range. otherwise, most pointers are going to be out of range and
// most memory ops will just trap
if (!allowOOB || !oneIn(10)) {
- if (wasm.memory.is64()) {
+ if (wasm.memories[0]->is64()) {
ret = builder.makeBinary(
AndInt64, ret, builder.makeConst(int64_t(USABLE_MEMORY - 1)));
} else {
@@ -1427,11 +1437,19 @@ Expression* TranslateToFuzzReader::makeNonAtomicLoad(Type type) {
bool signed_ = get() & 1;
switch (upTo(3)) {
case 0:
- return builder.makeLoad(1, signed_, offset, 1, ptr, type);
+ return builder.makeLoad(
+ 1, signed_, offset, 1, ptr, type, wasm.memories[0]->name);
case 1:
- return builder.makeLoad(2, signed_, offset, pick(1, 2), ptr, type);
+ return builder.makeLoad(
+ 2, signed_, offset, pick(1, 2), ptr, type, wasm.memories[0]->name);
case 2:
- return builder.makeLoad(4, signed_, offset, pick(1, 2, 4), ptr, type);
+ return builder.makeLoad(4,
+ signed_,
+ offset,
+ pick(1, 2, 4),
+ ptr,
+ type,
+ wasm.memories[0]->name);
}
WASM_UNREACHABLE("unexpected value");
}
@@ -1439,29 +1457,49 @@ Expression* TranslateToFuzzReader::makeNonAtomicLoad(Type type) {
bool signed_ = get() & 1;
switch (upTo(4)) {
case 0:
- return builder.makeLoad(1, signed_, offset, 1, ptr, type);
+ return builder.makeLoad(
+ 1, signed_, offset, 1, ptr, type, wasm.memories[0]->name);
case 1:
- return builder.makeLoad(2, signed_, offset, pick(1, 2), ptr, type);
+ return builder.makeLoad(
+ 2, signed_, offset, pick(1, 2), ptr, type, wasm.memories[0]->name);
case 2:
- return builder.makeLoad(4, signed_, offset, pick(1, 2, 4), ptr, type);
+ return builder.makeLoad(4,
+ signed_,
+ offset,
+ pick(1, 2, 4),
+ ptr,
+ type,
+ wasm.memories[0]->name);
case 3:
- return builder.makeLoad(
- 8, signed_, offset, pick(1, 2, 4, 8), ptr, type);
+ return builder.makeLoad(8,
+ signed_,
+ offset,
+ pick(1, 2, 4, 8),
+ ptr,
+ type,
+ wasm.memories[0]->name);
}
WASM_UNREACHABLE("unexpected value");
}
case Type::f32: {
- return builder.makeLoad(4, false, offset, pick(1, 2, 4), ptr, type);
+ return builder.makeLoad(
+ 4, false, offset, pick(1, 2, 4), ptr, type, wasm.memories[0]->name);
}
case Type::f64: {
- return builder.makeLoad(8, false, offset, pick(1, 2, 4, 8), ptr, type);
+ return builder.makeLoad(
+ 8, false, offset, pick(1, 2, 4, 8), ptr, type, wasm.memories[0]->name);
}
case Type::v128: {
if (!wasm.features.hasSIMD()) {
return makeTrivial(type);
}
- return builder.makeLoad(
- 16, false, offset, pick(1, 2, 4, 8, 16), ptr, type);
+ return builder.makeLoad(16,
+ false,
+ offset,
+ pick(1, 2, 4, 8, 16),
+ ptr,
+ type,
+ wasm.memories[0]->name);
}
case Type::none:
case Type::unreachable:
@@ -1484,7 +1522,7 @@ Expression* TranslateToFuzzReader::makeLoad(Type type) {
}
// make it atomic
auto* load = ret->cast<Load>();
- wasm.memory.shared = true;
+ wasm.memories[0]->shared = true;
load->isAtomic = true;
load->signed_ = false;
load->align = load->bytes;
@@ -1511,6 +1549,7 @@ Expression* TranslateToFuzzReader::makeNonAtomicStore(Type type) {
store->value = make(Type::unreachable);
break;
}
+ store->memory = wasm.memories[0]->name;
store->finalize();
return store;
}
@@ -1526,40 +1565,58 @@ Expression* TranslateToFuzzReader::makeNonAtomicStore(Type type) {
case Type::i32: {
switch (upTo(3)) {
case 0:
- return builder.makeStore(1, offset, 1, ptr, value, type);
+ return builder.makeStore(
+ 1, offset, 1, ptr, value, type, wasm.memories[0]->name);
case 1:
- return builder.makeStore(2, offset, pick(1, 2), ptr, value, type);
+ return builder.makeStore(
+ 2, offset, pick(1, 2), ptr, value, type, wasm.memories[0]->name);
case 2:
- return builder.makeStore(4, offset, pick(1, 2, 4), ptr, value, type);
+ return builder.makeStore(
+ 4, offset, pick(1, 2, 4), ptr, value, type, wasm.memories[0]->name);
}
WASM_UNREACHABLE("invalid value");
}
case Type::i64: {
switch (upTo(4)) {
case 0:
- return builder.makeStore(1, offset, 1, ptr, value, type);
+ return builder.makeStore(
+ 1, offset, 1, ptr, value, type, wasm.memories[0]->name);
case 1:
- return builder.makeStore(2, offset, pick(1, 2), ptr, value, type);
+ return builder.makeStore(
+ 2, offset, pick(1, 2), ptr, value, type, wasm.memories[0]->name);
case 2:
- return builder.makeStore(4, offset, pick(1, 2, 4), ptr, value, type);
- case 3:
return builder.makeStore(
- 8, offset, pick(1, 2, 4, 8), ptr, value, type);
+ 4, offset, pick(1, 2, 4), ptr, value, type, wasm.memories[0]->name);
+ case 3:
+ return builder.makeStore(8,
+ offset,
+ pick(1, 2, 4, 8),
+ ptr,
+ value,
+ type,
+ wasm.memories[0]->name);
}
WASM_UNREACHABLE("invalid value");
}
case Type::f32: {
- return builder.makeStore(4, offset, pick(1, 2, 4), ptr, value, type);
+ return builder.makeStore(
+ 4, offset, pick(1, 2, 4), ptr, value, type, wasm.memories[0]->name);
}
case Type::f64: {
- return builder.makeStore(8, offset, pick(1, 2, 4, 8), ptr, value, type);
+ return builder.makeStore(
+ 8, offset, pick(1, 2, 4, 8), ptr, value, type, wasm.memories[0]->name);
}
case Type::v128: {
if (!wasm.features.hasSIMD()) {
return makeTrivial(type);
}
- return builder.makeStore(
- 16, offset, pick(1, 2, 4, 8, 16), ptr, value, type);
+ return builder.makeStore(16,
+ offset,
+ pick(1, 2, 4, 8, 16),
+ ptr,
+ value,
+ type,
+ wasm.memories[0]->name);
}
case Type::none:
case Type::unreachable:
@@ -1584,7 +1641,7 @@ Expression* TranslateToFuzzReader::makeStore(Type type) {
return store;
}
// make it atomic
- wasm.memory.shared = true;
+ wasm.memories[0]->shared = true;
store->isAtomic = true;
store->align = store->bytes;
return store;
@@ -2530,7 +2587,7 @@ Expression* TranslateToFuzzReader::makeAtomic(Type type) {
if (!allowMemory) {
return makeTrivial(type);
}
- wasm.memory.shared = true;
+ wasm.memories[0]->shared = true;
if (type == Type::none) {
return builder.makeAtomicFence();
}
@@ -2540,12 +2597,17 @@ Expression* TranslateToFuzzReader::makeAtomic(Type type) {
auto expectedType = pick(Type::i32, Type::i64);
auto* expected = make(expectedType);
auto* timeout = make(Type::i64);
- return builder.makeAtomicWait(
- ptr, expected, timeout, expectedType, logify(get()));
+ return builder.makeAtomicWait(ptr,
+ expected,
+ timeout,
+ expectedType,
+ logify(get()),
+ wasm.memories[0]->name);
} else {
auto* ptr = makePointer();
auto* count = make(Type::i32);
- return builder.makeAtomicNotify(ptr, count, logify(get()));
+ return builder.makeAtomicNotify(
+ ptr, count, logify(get()), wasm.memories[0]->name);
}
}
Index bytes;
@@ -2598,12 +2660,13 @@ Expression* TranslateToFuzzReader::makeAtomic(Type type) {
offset,
ptr,
value,
- type);
+ type,
+ wasm.memories[0]->name);
} else {
auto* expected = make(type);
auto* replacement = make(type);
return builder.makeAtomicCmpxchg(
- bytes, offset, ptr, expected, replacement, type);
+ bytes, offset, ptr, expected, replacement, type, wasm.memories[0]->name);
}
}
@@ -2804,7 +2867,7 @@ Expression* TranslateToFuzzReader::makeSIMDLoad() {
WASM_UNREACHABLE("Unexpected SIMD loads");
}
Expression* ptr = makePointer();
- return builder.makeSIMDLoad(op, offset, align, ptr);
+ return builder.makeSIMDLoad(op, offset, align, ptr, wasm.memories[0]->name);
}
Expression* TranslateToFuzzReader::makeBulkMemory(Type type) {
@@ -2868,7 +2931,8 @@ Expression* TranslateToFuzzReader::makeMemoryInit() {
Expression* dest = makePointer();
Expression* offset = builder.makeConst(int32_t(offsetVal));
Expression* size = builder.makeConst(int32_t(sizeVal));
- return builder.makeMemoryInit(segment, dest, offset, size);
+ return builder.makeMemoryInit(
+ segment, dest, offset, size, wasm.memories[0]->name);
}
Expression* TranslateToFuzzReader::makeDataDrop() {
@@ -2884,8 +2948,9 @@ Expression* TranslateToFuzzReader::makeMemoryCopy() {
}
Expression* dest = makePointer();
Expression* source = makePointer();
- Expression* size = make(wasm.memory.indexType);
- return builder.makeMemoryCopy(dest, source, size);
+ Expression* size = make(wasm.memories[0]->indexType);
+ return builder.makeMemoryCopy(
+ dest, source, size, wasm.memories[0]->name, wasm.memories[0]->name);
}
Expression* TranslateToFuzzReader::makeMemoryFill() {
@@ -2894,8 +2959,8 @@ Expression* TranslateToFuzzReader::makeMemoryFill() {
}
Expression* dest = makePointer();
Expression* value = make(Type::i32);
- Expression* size = make(wasm.memory.indexType);
- return builder.makeMemoryFill(dest, value, size);
+ Expression* size = make(wasm.memories[0]->indexType);
+ return builder.makeMemoryFill(dest, value, size, wasm.memories[0]->name);
}
Type TranslateToFuzzReader::getSingleConcreteType() {
diff --git a/src/tools/wasm-ctor-eval.cpp b/src/tools/wasm-ctor-eval.cpp
index 6150441cd..91128471c 100644
--- a/src/tools/wasm-ctor-eval.cpp
+++ b/src/tools/wasm-ctor-eval.cpp
@@ -122,14 +122,11 @@ std::unique_ptr<Module> buildEnvModule(Module& wasm) {
// create an exported memory with the same initial and max size
ModuleUtils::iterImportedMemories(wasm, [&](Memory* memory) {
if (memory->module == env->name) {
- env->memory.name = wasm.memory.name;
- env->memory.exists = true;
- env->memory.initial = memory->initial;
- env->memory.max = memory->max;
- env->memory.shared = memory->shared;
- env->memory.indexType = memory->indexType;
+ auto* copied = ModuleUtils::copyMemory(memory, *env);
+ copied->module = Name();
+ copied->base = Name();
env->addExport(Builder(*env).makeExport(
- wasm.memory.base, wasm.memory.name, ExternalKind::Memory));
+ memory->base, copied->name, ExternalKind::Memory));
}
});
@@ -147,7 +144,7 @@ struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
std::map<Name, std::shared_ptr<EvallingModuleRunner>> linkedInstances;
// A representation of the contents of wasm memory as we execute.
- std::vector<char> memory;
+ std::unordered_map<Name, std::vector<char>> memories;
CtorEvalExternalInterface(
std::map<Name, std::shared_ptr<EvallingModuleRunner>> linkedInstances_ =
@@ -160,8 +157,8 @@ struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
void applyToModule() {
clearApplyState();
- // If nothing was ever written to memory then there is nothing to update.
- if (!memory.empty()) {
+ // If nothing was ever written to memories then there is nothing to update.
+ if (!memories.empty()) {
applyMemoryToModule();
}
@@ -171,6 +168,12 @@ struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
void init(Module& wasm_, EvallingModuleRunner& instance_) override {
wasm = &wasm_;
instance = &instance_;
+ for (auto& memory : wasm->memories) {
+ if (!memory->imported()) {
+ std::vector<char> data;
+ memories[memory->name] = data;
+ }
+ }
}
void importGlobals(GlobalValueSet& globals, Module& wasm_) override {
@@ -204,7 +207,7 @@ struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
}
// Write out a count of i32(0) and return __WASI_ERRNO_SUCCESS (0).
- store32(arguments[0].geti32(), 0);
+ store32(arguments[0].geti32(), 0, wasm->memories[0]->name);
return {Literal(int32_t(0))};
}
@@ -225,7 +228,7 @@ struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
}
// Write out an argc of i32(0) and return a __WASI_ERRNO_SUCCESS (0).
- store32(arguments[0].geti32(), 0);
+ store32(arguments[0].geti32(), 0, wasm->memories[0]->name);
return {Literal(int32_t(0))};
}
@@ -336,29 +339,47 @@ struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
// called during initialization
void tableStore(Name tableName, Index index, const Literal& value) override {}
- int8_t load8s(Address addr) override { return doLoad<int8_t>(addr); }
- uint8_t load8u(Address addr) override { return doLoad<uint8_t>(addr); }
- int16_t load16s(Address addr) override { return doLoad<int16_t>(addr); }
- uint16_t load16u(Address addr) override { return doLoad<uint16_t>(addr); }
- int32_t load32s(Address addr) override { return doLoad<int32_t>(addr); }
- uint32_t load32u(Address addr) override { return doLoad<uint32_t>(addr); }
- int64_t load64s(Address addr) override { return doLoad<int64_t>(addr); }
- uint64_t load64u(Address addr) override { return doLoad<uint64_t>(addr); }
+ int8_t load8s(Address addr, Name memoryName) override {
+ return doLoad<int8_t>(addr, memoryName);
+ }
+ uint8_t load8u(Address addr, Name memoryName) override {
+ return doLoad<uint8_t>(addr, memoryName);
+ }
+ int16_t load16s(Address addr, Name memoryName) override {
+ return doLoad<int16_t>(addr, memoryName);
+ }
+ uint16_t load16u(Address addr, Name memoryName) override {
+ return doLoad<uint16_t>(addr, memoryName);
+ }
+ int32_t load32s(Address addr, Name memoryName) override {
+ return doLoad<int32_t>(addr, memoryName);
+ }
+ uint32_t load32u(Address addr, Name memoryName) override {
+ return doLoad<uint32_t>(addr, memoryName);
+ }
+ int64_t load64s(Address addr, Name memoryName) override {
+ return doLoad<int64_t>(addr, memoryName);
+ }
+ uint64_t load64u(Address addr, Name memoryName) override {
+ return doLoad<uint64_t>(addr, memoryName);
+ }
- void store8(Address addr, int8_t value) override {
- doStore<int8_t>(addr, value);
+ void store8(Address addr, int8_t value, Name memoryName) override {
+ doStore<int8_t>(addr, value, memoryName);
}
- void store16(Address addr, int16_t value) override {
- doStore<int16_t>(addr, value);
+ void store16(Address addr, int16_t value, Name memoryName) override {
+ doStore<int16_t>(addr, value, memoryName);
}
- void store32(Address addr, int32_t value) override {
- doStore<int32_t>(addr, value);
+ void store32(Address addr, int32_t value, Name memoryName) override {
+ doStore<int32_t>(addr, value, memoryName);
}
- void store64(Address addr, int64_t value) override {
- doStore<int64_t>(addr, value);
+ void store64(Address addr, int64_t value, Name memoryName) override {
+ doStore<int64_t>(addr, value, memoryName);
}
- bool growMemory(Address /*oldSize*/, Address /*newSize*/) override {
+ bool growMemory(Name memoryName,
+ Address /*oldSize*/,
+ Address /*newSize*/) override {
throw FailToEvalException("grow memory");
}
@@ -385,8 +406,12 @@ struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
private:
// TODO: handle unaligned too, see shell-interface
-
- template<typename T> T* getMemory(Address address) {
+ template<typename T> T* getMemory(Address address, Name memoryName) {
+ auto it = memories.find(memoryName);
+ if (it == memories.end()) {
+ Fatal() << "memory not found: " << memoryName;
+ }
+ auto& memory = it->second;
// resize the memory buffer as needed.
auto max = address + sizeof(T);
if (max > memory.size()) {
@@ -395,15 +420,15 @@ private:
return (T*)(&memory[address]);
}
- template<typename T> void doStore(Address address, T value) {
+ template<typename T> void doStore(Address address, T value, Name memoryName) {
// do a memcpy to avoid undefined behavior if unaligned
- memcpy(getMemory<T>(address), &value, sizeof(T));
+ memcpy(getMemory<T>(address, memoryName), &value, sizeof(T));
}
- template<typename T> T doLoad(Address address) {
+ template<typename T> T doLoad(Address address, Name memoryName) {
// do a memcpy to avoid undefined behavior if unaligned
T ret;
- memcpy(&ret, getMemory<T>(address), sizeof(T));
+ memcpy(&ret, getMemory<T>(address, memoryName), sizeof(T));
return ret;
}
@@ -431,14 +456,15 @@ private:
auto curr = builder.makeDataSegment();
curr->offset = builder.makeConst(int32_t(0));
curr->setName(Name::fromInt(0), false);
- wasm->dataSegments.push_back(std::move(curr));
+ curr->memory = wasm->memories[0]->name;
+ wasm->addDataSegment(std::move(curr));
}
auto& segment = wasm->dataSegments[0];
assert(segment->offset->cast<Const>()->value.getInteger() == 0);
// Copy the current memory contents after execution into the Module's
// memory.
- segment->data = memory;
+ segment->data = memories[wasm->memories[0]->name];
}
// Serializing GC data requires more work than linear memory, because
diff --git a/src/tools/wasm-shell.cpp b/src/tools/wasm-shell.cpp
index 51eafb652..4b20e922b 100644
--- a/src/tools/wasm-shell.cpp
+++ b/src/tools/wasm-shell.cpp
@@ -284,9 +284,7 @@ protected:
}
}
});
- if (wasm.memory.imported()) {
- reportUnknownImport(&wasm.memory);
- }
+ ModuleUtils::iterImportedMemories(wasm, reportUnknownImport);
}
if (!invalid && id == ASSERT_TRAP) {
@@ -352,11 +350,10 @@ protected:
spectest->addExport(
builder.makeExport("table", Name::fromInt(0), ExternalKind::Table));
- spectest->memory.exists = true;
- spectest->memory.initial = 1;
- spectest->memory.max = 2;
- spectest->addExport(builder.makeExport(
- "memory", spectest->memory.name, ExternalKind::Memory));
+ Memory* memory =
+ spectest->addMemory(builder.makeMemory(Name::fromInt(0), 1, 2));
+ spectest->addExport(
+ builder.makeExport("memory", memory->name, ExternalKind::Memory));
modules["spectest"].swap(spectest);
modules["spectest"]->features = FeatureSet::All;
diff --git a/src/tools/wasm-split/instrumenter.cpp b/src/tools/wasm-split/instrumenter.cpp
index c23a70f06..50da9d034 100644
--- a/src/tools/wasm-split/instrumenter.cpp
+++ b/src/tools/wasm-split/instrumenter.cpp
@@ -108,13 +108,15 @@ void Instrumenter::instrumentFuncs() {
}
// (i32.atomic.store8 offset=funcidx (i32.const 0) (i32.const 1))
Index funcIdx = 0;
+ assert(!wasm->memories.empty());
ModuleUtils::iterDefinedFunctions(*wasm, [&](Function* func) {
func->body = builder.makeSequence(
builder.makeAtomicStore(1,
funcIdx,
- builder.makeConstPtr(0),
+ builder.makeConstPtr(0, Type::i32),
builder.makeConst(uint32_t(1)),
- Type::i32),
+ Type::i32,
+ wasm->memories[0]->name),
func->body,
func->body->type);
++funcIdx;
@@ -168,9 +170,22 @@ void Instrumenter::addProfileExport() {
return builder.makeConst(int32_t(profileSize));
};
+ // Also make sure there is a memory with enough pages to write into
+ size_t pages = (profileSize + Memory::kPageSize - 1) / Memory::kPageSize;
+ if (wasm->memories.empty()) {
+ wasm->addMemory(Builder::makeMemory("0"));
+ wasm->memories[0]->initial = pages;
+ wasm->memories[0]->max = pages;
+ } else if (wasm->memories[0]->initial < pages) {
+ wasm->memories[0]->initial = pages;
+ if (wasm->memories[0]->max < pages) {
+ wasm->memories[0]->max = pages;
+ }
+ }
+
// Write the hash followed by all the time stamps
- Expression* writeData =
- builder.makeStore(8, 0, 1, getAddr(), hashConst(), Type::i64);
+ Expression* writeData = builder.makeStore(
+ 8, 0, 1, getAddr(), hashConst(), Type::i64, wasm->memories[0]->name);
uint32_t offset = 8;
switch (options.storageKind) {
@@ -183,7 +198,8 @@ void Instrumenter::addProfileExport() {
1,
getAddr(),
builder.makeGlobalGet(global, Type::i32),
- Type::i32));
+ Type::i32,
+ wasm->memories[0]->name));
offset += 4;
}
break;
@@ -232,8 +248,10 @@ void Instrumenter::addProfileExport() {
getAddr(),
builder.makeBinary(
MulInt32, getFuncIdx(), builder.makeConst(uint32_t(4)))),
- builder.makeAtomicLoad(1, 0, getFuncIdx(), Type::i32),
- Type::i32),
+ builder.makeAtomicLoad(
+ 1, 0, getFuncIdx(), Type::i32, wasm->memories[0]->name),
+ Type::i32,
+ wasm->memories[0]->name),
builder.makeLocalSet(
funcIdxVar,
builder.makeBinary(
@@ -253,21 +271,8 @@ void Instrumenter::addProfileExport() {
wasm->addExport(
Builder::makeExport(options.profileExport, name, ExternalKind::Function));
- // Also make sure there is a memory with enough pages to write into
- size_t pages = (profileSize + Memory::kPageSize - 1) / Memory::kPageSize;
- if (!wasm->memory.exists) {
- wasm->memory.exists = true;
- wasm->memory.initial = pages;
- wasm->memory.max = pages;
- } else if (wasm->memory.initial < pages) {
- wasm->memory.initial = pages;
- if (wasm->memory.max < pages) {
- wasm->memory.max = pages;
- }
- }
-
// Export the memory if it is not already exported or imported.
- if (!wasm->memory.imported()) {
+ if (!wasm->memories[0]->imported()) {
bool memoryExported = false;
for (auto& ex : wasm->exports) {
if (ex->kind == ExternalKind::Memory) {
@@ -276,10 +281,10 @@ void Instrumenter::addProfileExport() {
}
}
if (!memoryExported) {
- wasm->addExport(
- Builder::makeExport("profile-memory",
- Names::getValidExportName(*wasm, wasm->memory.name),
- ExternalKind::Memory));
+ wasm->addExport(Builder::makeExport(
+ "profile-memory",
+ Names::getValidExportName(*wasm, wasm->memories[0]->name),
+ ExternalKind::Memory));
}
}
}
diff --git a/src/wasm-binary.h b/src/wasm-binary.h
index 6b05c38cc..9424f6fda 100644
--- a/src/wasm-binary.h
+++ b/src/wasm-binary.h
@@ -1187,6 +1187,7 @@ class WasmBinaryWriter {
std::unordered_map<Name, Index> globalIndexes;
std::unordered_map<Name, Index> tableIndexes;
std::unordered_map<Name, Index> elemIndexes;
+ std::unordered_map<Name, Index> memoryIndexes;
std::unordered_map<Name, Index> dataIndexes;
BinaryIndexes(Module& wasm) {
@@ -1209,6 +1210,7 @@ class WasmBinaryWriter {
addIndexes(wasm.functions, functionIndexes);
addIndexes(wasm.tags, tagIndexes);
addIndexes(wasm.tables, tableIndexes);
+ addIndexes(wasm.memories, memoryIndexes);
for (auto& curr : wasm.elementSegments) {
auto index = elemIndexes.size();
@@ -1281,7 +1283,7 @@ public:
int32_t startSubsection(BinaryConsts::UserSections::Subsection code);
void finishSubsection(int32_t start);
void writeStart();
- void writeMemory();
+ void writeMemories();
void writeTypes();
void writeImports();
@@ -1297,6 +1299,7 @@ public:
uint32_t getFunctionIndex(Name name) const;
uint32_t getTableIndex(Name name) const;
+ uint32_t getMemoryIndex(Name name) const;
uint32_t getGlobalIndex(Name name) const;
uint32_t getTagIndex(Name name) const;
uint32_t getTypeIndex(HeapType type) const;
@@ -1466,12 +1469,13 @@ public:
void verifyInt64(int64_t x);
void readHeader();
void readStart();
- void readMemory();
+ void readMemories();
void readTypes();
// gets a name in the combined import+defined space
Name getFunctionName(Index index);
Name getTableName(Index index);
+ Name getMemoryName(Index index);
Name getGlobalName(Index index);
Name getTagName(Index index);
@@ -1526,6 +1530,15 @@ public:
// names
std::vector<std::unique_ptr<ElementSegment>> elementSegments;
+ // we store memories here after being read from binary, before we know their
+ // names
+ std::vector<std::unique_ptr<Memory>> memories;
+ // we store memory imports here before wasm.addMemoryImport after we know
+ // their names
+ std::vector<Memory*> memoryImports;
+ // at index i we have all references to the memory i
+ std::map<Index, std::vector<wasm::Name*>> memoryRefs;
+
// we store data here after being read from binary, before we know their names
std::vector<std::unique_ptr<DataSegment>> dataSegments;
@@ -1646,7 +1659,7 @@ public:
BreakTarget getBreakTarget(int32_t offset);
Name getExceptionTargetName(int32_t offset);
- void readMemoryAccess(Address& alignment, Address& offset);
+ Index readMemoryAccess(Address& alignment, Address& offset);
void visitIf(If* curr);
void visitLoop(Loop* curr);
diff --git a/src/wasm-builder.h b/src/wasm-builder.h
index 3677b9796..69e7a7c6a 100644
--- a/src/wasm-builder.h
+++ b/src/wasm-builder.h
@@ -107,14 +107,30 @@ public:
return seg;
}
+ static std::unique_ptr<Memory> makeMemory(Name name,
+ Address initial = 0,
+ Address max = Memory::kMaxSize32,
+ bool shared = false,
+ Type indexType = Type::i32) {
+ auto memory = std::make_unique<Memory>();
+ memory->name = name;
+ memory->initial = initial;
+ memory->max = max;
+ memory->shared = shared;
+ memory->indexType = indexType;
+ return memory;
+ }
+
static std::unique_ptr<DataSegment>
makeDataSegment(Name name = "",
+ Name memory = "",
bool isPassive = false,
Expression* offset = nullptr,
const char* init = "",
Address size = 0) {
auto seg = std::make_unique<DataSegment>();
seg->name = name;
+ seg->memory = memory;
seg->isPassive = isPassive;
seg->offset = offset;
seg->data.resize(size);
@@ -354,7 +370,8 @@ public:
uint32_t offset,
unsigned align,
Expression* ptr,
- Type type) {
+ Type type,
+ Name memory) {
auto* ret = wasm.allocator.alloc<Load>();
ret->isAtomic = false;
ret->bytes = bytes;
@@ -363,11 +380,12 @@ public:
ret->align = align;
ret->ptr = ptr;
ret->type = type;
+ ret->memory = memory;
return ret;
}
- Load*
- makeAtomicLoad(unsigned bytes, uint32_t offset, Expression* ptr, Type type) {
- Load* load = makeLoad(bytes, false, offset, bytes, ptr, type);
+ Load* makeAtomicLoad(
+ unsigned bytes, uint32_t offset, Expression* ptr, Type type, Name memory) {
+ Load* load = makeLoad(bytes, false, offset, bytes, ptr, type, memory);
load->isAtomic = true;
return load;
}
@@ -375,7 +393,8 @@ public:
Expression* expected,
Expression* timeout,
Type expectedType,
- Address offset) {
+ Address offset,
+ Name memory) {
auto* wait = wasm.allocator.alloc<AtomicWait>();
wait->offset = offset;
wait->ptr = ptr;
@@ -383,15 +402,19 @@ public:
wait->timeout = timeout;
wait->expectedType = expectedType;
wait->finalize();
+ wait->memory = memory;
return wait;
}
- AtomicNotify*
- makeAtomicNotify(Expression* ptr, Expression* notifyCount, Address offset) {
+ AtomicNotify* makeAtomicNotify(Expression* ptr,
+ Expression* notifyCount,
+ Address offset,
+ Name memory) {
auto* notify = wasm.allocator.alloc<AtomicNotify>();
notify->offset = offset;
notify->ptr = ptr;
notify->notifyCount = notifyCount;
notify->finalize();
+ notify->memory = memory;
return notify;
}
AtomicFence* makeAtomicFence() { return wasm.allocator.alloc<AtomicFence>(); }
@@ -400,7 +423,8 @@ public:
unsigned align,
Expression* ptr,
Expression* value,
- Type type) {
+ Type type,
+ Name memory) {
auto* ret = wasm.allocator.alloc<Store>();
ret->isAtomic = false;
ret->bytes = bytes;
@@ -409,6 +433,7 @@ public:
ret->ptr = ptr;
ret->value = value;
ret->valueType = type;
+ ret->memory = memory;
ret->finalize();
assert(ret->value->type.isConcrete() ? ret->value->type == type : true);
return ret;
@@ -417,8 +442,9 @@ public:
uint32_t offset,
Expression* ptr,
Expression* value,
- Type type) {
- Store* store = makeStore(bytes, offset, bytes, ptr, value, type);
+ Type type,
+ Name memory) {
+ Store* store = makeStore(bytes, offset, bytes, ptr, value, type, memory);
store->isAtomic = true;
return store;
}
@@ -427,7 +453,8 @@ public:
uint32_t offset,
Expression* ptr,
Expression* value,
- Type type) {
+ Type type,
+ Name memory) {
auto* ret = wasm.allocator.alloc<AtomicRMW>();
ret->op = op;
ret->bytes = bytes;
@@ -436,6 +463,7 @@ public:
ret->value = value;
ret->type = type;
ret->finalize();
+ ret->memory = memory;
return ret;
}
AtomicCmpxchg* makeAtomicCmpxchg(unsigned bytes,
@@ -443,7 +471,8 @@ public:
Expression* ptr,
Expression* expected,
Expression* replacement,
- Type type) {
+ Type type,
+ Name memory) {
auto* ret = wasm.allocator.alloc<AtomicCmpxchg>();
ret->bytes = bytes;
ret->offset = offset;
@@ -452,6 +481,7 @@ public:
ret->replacement = replacement;
ret->type = type;
ret->finalize();
+ ret->memory = memory;
return ret;
}
SIMDExtract*
@@ -505,13 +535,17 @@ public:
ret->finalize();
return ret;
}
- SIMDLoad*
- makeSIMDLoad(SIMDLoadOp op, Address offset, Address align, Expression* ptr) {
+ SIMDLoad* makeSIMDLoad(SIMDLoadOp op,
+ Address offset,
+ Address align,
+ Expression* ptr,
+ Name memory) {
auto* ret = wasm.allocator.alloc<SIMDLoad>();
ret->op = op;
ret->offset = offset;
ret->align = align;
ret->ptr = ptr;
+ ret->memory = memory;
ret->finalize();
return ret;
}
@@ -520,7 +554,8 @@ public:
Address align,
uint8_t index,
Expression* ptr,
- Expression* vec) {
+ Expression* vec,
+ Name memory) {
auto* ret = wasm.allocator.alloc<SIMDLoadStoreLane>();
ret->op = op;
ret->offset = offset;
@@ -529,17 +564,20 @@ public:
ret->ptr = ptr;
ret->vec = vec;
ret->finalize();
+ ret->memory = memory;
return ret;
}
MemoryInit* makeMemoryInit(uint32_t segment,
Expression* dest,
Expression* offset,
- Expression* size) {
+ Expression* size,
+ Name memory) {
auto* ret = wasm.allocator.alloc<MemoryInit>();
ret->segment = segment;
ret->dest = dest;
ret->offset = offset;
ret->size = size;
+ ret->memory = memory;
ret->finalize();
return ret;
}
@@ -549,21 +587,29 @@ public:
ret->finalize();
return ret;
}
- MemoryCopy*
- makeMemoryCopy(Expression* dest, Expression* source, Expression* size) {
+ MemoryCopy* makeMemoryCopy(Expression* dest,
+ Expression* source,
+ Expression* size,
+ Name destMemory,
+ Name sourceMemory) {
auto* ret = wasm.allocator.alloc<MemoryCopy>();
ret->dest = dest;
ret->source = source;
ret->size = size;
+ ret->destMemory = destMemory;
+ ret->sourceMemory = sourceMemory;
ret->finalize();
return ret;
}
- MemoryFill*
- makeMemoryFill(Expression* dest, Expression* value, Expression* size) {
+ MemoryFill* makeMemoryFill(Expression* dest,
+ Expression* value,
+ Expression* size,
+ Name memory) {
auto* ret = wasm.allocator.alloc<MemoryFill>();
ret->dest = dest;
ret->value = value;
ret->size = size;
+ ret->memory = memory;
ret->finalize();
return ret;
}
@@ -582,8 +628,8 @@ public:
ret->finalize();
return ret;
}
- Const* makeConstPtr(uint64_t val) {
- return makeConst(Literal::makeFromInt64(val, wasm.memory.indexType));
+ Const* makeConstPtr(uint64_t val, Type indexType) {
+ return makeConst(Literal::makeFromInt64(val, indexType));
}
Binary* makeBinary(BinaryOp op, Expression* left, Expression* right) {
auto* ret = wasm.allocator.alloc<Binary>();
@@ -618,20 +664,24 @@ public:
ret->value = value;
return ret;
}
- MemorySize* makeMemorySize() {
+ MemorySize* makeMemorySize(Name memoryName) {
+ auto memory = wasm.getMemory(memoryName);
auto* ret = wasm.allocator.alloc<MemorySize>();
- if (wasm.memory.is64()) {
+ if (memory->is64()) {
ret->make64();
}
+ ret->memory = memoryName;
ret->finalize();
return ret;
}
- MemoryGrow* makeMemoryGrow(Expression* delta) {
+ MemoryGrow* makeMemoryGrow(Expression* delta, Name memoryName) {
+ auto memory = wasm.getMemory(memoryName);
auto* ret = wasm.allocator.alloc<MemoryGrow>();
- if (wasm.memory.is64()) {
+ if (memory->is64()) {
ret->make64();
}
ret->delta = delta;
+ ret->memory = memoryName;
ret->finalize();
return ret;
}
diff --git a/src/wasm-delegations-fields.def b/src/wasm-delegations-fields.def
index 0e3f60826..ff0f41d50 100644
--- a/src/wasm-delegations-fields.def
+++ b/src/wasm-delegations-fields.def
@@ -266,6 +266,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_ADDRESS(Load, offset);
DELEGATE_FIELD_ADDRESS(Load, align);
DELEGATE_FIELD_INT(Load, isAtomic);
+ DELEGATE_FIELD_NAME(Load, memory);
DELEGATE_END(Load);
break;
}
@@ -278,6 +279,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_ADDRESS(Store, align);
DELEGATE_FIELD_INT(Store, isAtomic);
DELEGATE_FIELD_TYPE(Store, valueType);
+ DELEGATE_FIELD_NAME(Store, memory);
DELEGATE_END(Store);
break;
}
@@ -288,6 +290,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_INT(AtomicRMW, op);
DELEGATE_FIELD_INT(AtomicRMW, bytes);
DELEGATE_FIELD_ADDRESS(AtomicRMW, offset);
+ DELEGATE_FIELD_NAME(AtomicRMW, memory);
DELEGATE_END(AtomicRMW);
break;
}
@@ -298,6 +301,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_CHILD(AtomicCmpxchg, ptr);
DELEGATE_FIELD_INT(AtomicCmpxchg, bytes);
DELEGATE_FIELD_ADDRESS(AtomicCmpxchg, offset);
+ DELEGATE_FIELD_NAME(AtomicCmpxchg, memory);
DELEGATE_END(AtomicCmpxchg);
break;
}
@@ -308,6 +312,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_CHILD(AtomicWait, ptr);
DELEGATE_FIELD_ADDRESS(AtomicWait, offset);
DELEGATE_FIELD_TYPE(AtomicWait, expectedType);
+ DELEGATE_FIELD_NAME(AtomicWait, memory);
DELEGATE_END(AtomicWait);
break;
}
@@ -316,6 +321,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_CHILD(AtomicNotify, notifyCount);
DELEGATE_FIELD_CHILD(AtomicNotify, ptr);
DELEGATE_FIELD_ADDRESS(AtomicNotify, offset);
+ DELEGATE_FIELD_NAME(AtomicNotify, memory);
DELEGATE_END(AtomicNotify);
break;
}
@@ -373,6 +379,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_INT(SIMDLoad, op);
DELEGATE_FIELD_ADDRESS(SIMDLoad, offset);
DELEGATE_FIELD_ADDRESS(SIMDLoad, align);
+ DELEGATE_FIELD_NAME(SIMDLoad, memory);
DELEGATE_END(SIMDLoad);
break;
}
@@ -384,6 +391,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_ADDRESS(SIMDLoadStoreLane, offset);
DELEGATE_FIELD_ADDRESS(SIMDLoadStoreLane, align);
DELEGATE_FIELD_INT(SIMDLoadStoreLane, index);
+ DELEGATE_FIELD_NAME(SIMDLoadStoreLane, memory);
DELEGATE_END(SIMDLoadStoreLane);
break;
}
@@ -393,6 +401,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_CHILD(MemoryInit, offset);
DELEGATE_FIELD_CHILD(MemoryInit, dest);
DELEGATE_FIELD_INT(MemoryInit, segment);
+ DELEGATE_FIELD_NAME(MemoryInit, memory);
DELEGATE_END(MemoryInit);
break;
}
@@ -407,6 +416,8 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_CHILD(MemoryCopy, size);
DELEGATE_FIELD_CHILD(MemoryCopy, source);
DELEGATE_FIELD_CHILD(MemoryCopy, dest);
+ DELEGATE_FIELD_NAME(MemoryCopy, sourceMemory);
+ DELEGATE_FIELD_NAME(MemoryCopy, destMemory);
DELEGATE_END(MemoryCopy);
break;
}
@@ -415,6 +426,7 @@ switch (DELEGATE_ID) {
DELEGATE_FIELD_CHILD(MemoryFill, size);
DELEGATE_FIELD_CHILD(MemoryFill, value);
DELEGATE_FIELD_CHILD(MemoryFill, dest);
+ DELEGATE_FIELD_NAME(MemoryFill, memory);
DELEGATE_END(MemoryFill);
break;
}
@@ -462,6 +474,7 @@ switch (DELEGATE_ID) {
case Expression::Id::MemorySizeId: {
DELEGATE_START(MemorySize);
DELEGATE_FIELD_TYPE(MemorySize, ptrType);
+ DELEGATE_FIELD_NAME(MemorySize, memory);
DELEGATE_END(MemorySize);
break;
}
@@ -469,6 +482,7 @@ switch (DELEGATE_ID) {
DELEGATE_START(MemoryGrow);
DELEGATE_FIELD_TYPE(MemoryGrow, ptrType);
DELEGATE_FIELD_CHILD(MemoryGrow, delta);
+ DELEGATE_FIELD_NAME(MemoryGrow, memory);
DELEGATE_END(MemoryGrow);
break;
}
diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h
index 254a1a2e2..fec560e35 100644
--- a/src/wasm-interpreter.h
+++ b/src/wasm-interpreter.h
@@ -2273,7 +2273,7 @@ public:
Literals& arguments,
Type result,
SubType& instance) = 0;
- virtual bool growMemory(Address oldSize, Address newSize) = 0;
+ virtual bool growMemory(Name name, Address oldSize, Address newSize) = 0;
virtual bool growTable(Name name,
const Literal& value,
Index oldSize,
@@ -2284,18 +2284,18 @@ public:
// the default impls for load and store switch on the sizes. you can either
// customize load/store, or the sub-functions which they call
- virtual Literal load(Load* load, Address addr) {
+ virtual Literal load(Load* load, Address addr, Name memory) {
switch (load->type.getBasic()) {
case Type::i32: {
switch (load->bytes) {
case 1:
- return load->signed_ ? Literal((int32_t)load8s(addr))
- : Literal((int32_t)load8u(addr));
+ return load->signed_ ? Literal((int32_t)load8s(addr, memory))
+ : Literal((int32_t)load8u(addr, memory));
case 2:
- return load->signed_ ? Literal((int32_t)load16s(addr))
- : Literal((int32_t)load16u(addr));
+ return load->signed_ ? Literal((int32_t)load16s(addr, memory))
+ : Literal((int32_t)load16u(addr, memory));
case 4:
- return Literal((int32_t)load32s(addr));
+ return Literal((int32_t)load32s(addr, memory));
default:
WASM_UNREACHABLE("invalid size");
}
@@ -2304,45 +2304,45 @@ public:
case Type::i64: {
switch (load->bytes) {
case 1:
- return load->signed_ ? Literal((int64_t)load8s(addr))
- : Literal((int64_t)load8u(addr));
+ return load->signed_ ? Literal((int64_t)load8s(addr, memory))
+ : Literal((int64_t)load8u(addr, memory));
case 2:
- return load->signed_ ? Literal((int64_t)load16s(addr))
- : Literal((int64_t)load16u(addr));
+ return load->signed_ ? Literal((int64_t)load16s(addr, memory))
+ : Literal((int64_t)load16u(addr, memory));
case 4:
- return load->signed_ ? Literal((int64_t)load32s(addr))
- : Literal((int64_t)load32u(addr));
+ return load->signed_ ? Literal((int64_t)load32s(addr, memory))
+ : Literal((int64_t)load32u(addr, memory));
case 8:
- return Literal((int64_t)load64s(addr));
+ return Literal((int64_t)load64s(addr, memory));
default:
WASM_UNREACHABLE("invalid size");
}
break;
}
case Type::f32:
- return Literal(load32u(addr)).castToF32();
+ return Literal(load32u(addr, memory)).castToF32();
case Type::f64:
- return Literal(load64u(addr)).castToF64();
+ return Literal(load64u(addr, memory)).castToF64();
case Type::v128:
- return Literal(load128(addr).data());
+ return Literal(load128(addr, load->memory).data());
case Type::none:
case Type::unreachable:
WASM_UNREACHABLE("unexpected type");
}
WASM_UNREACHABLE("invalid type");
}
- virtual void store(Store* store, Address addr, Literal value) {
+ virtual void store(Store* store, Address addr, Literal value, Name memory) {
switch (store->valueType.getBasic()) {
case Type::i32: {
switch (store->bytes) {
case 1:
- store8(addr, value.geti32());
+ store8(addr, value.geti32(), memory);
break;
case 2:
- store16(addr, value.geti32());
+ store16(addr, value.geti32(), memory);
break;
case 4:
- store32(addr, value.geti32());
+ store32(addr, value.geti32(), memory);
break;
default:
WASM_UNREACHABLE("invalid store size");
@@ -2352,16 +2352,16 @@ public:
case Type::i64: {
switch (store->bytes) {
case 1:
- store8(addr, value.geti64());
+ store8(addr, value.geti64(), memory);
break;
case 2:
- store16(addr, value.geti64());
+ store16(addr, value.geti64(), memory);
break;
case 4:
- store32(addr, value.geti64());
+ store32(addr, value.geti64(), memory);
break;
case 8:
- store64(addr, value.geti64());
+ store64(addr, value.geti64(), memory);
break;
default:
WASM_UNREACHABLE("invalid store size");
@@ -2370,13 +2370,13 @@ public:
}
// write floats carefully, ensuring all bits reach memory
case Type::f32:
- store32(addr, value.reinterpreti32());
+ store32(addr, value.reinterpreti32(), memory);
break;
case Type::f64:
- store64(addr, value.reinterpreti64());
+ store64(addr, value.reinterpreti64(), memory);
break;
case Type::v128:
- store128(addr, value.getv128());
+ store128(addr, value.getv128(), memory);
break;
case Type::none:
case Type::unreachable:
@@ -2384,31 +2384,48 @@ public:
}
}
- virtual int8_t load8s(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual uint8_t load8u(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual int16_t load16s(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual uint16_t load16u(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual int32_t load32s(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual uint32_t load32u(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual int64_t load64s(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual uint64_t load64u(Address addr) { WASM_UNREACHABLE("unimp"); }
- virtual std::array<uint8_t, 16> load128(Address addr) {
+ virtual int8_t load8s(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual uint8_t load8u(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual int16_t load16s(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual uint16_t load16u(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual int32_t load32s(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual uint32_t load32u(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual int64_t load64s(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual uint64_t load64u(Address addr, Name memoryName) {
+ WASM_UNREACHABLE("unimp");
+ }
+ virtual std::array<uint8_t, 16> load128(Address addr, Name memoryName) {
WASM_UNREACHABLE("unimp");
}
- virtual void store8(Address addr, int8_t value) {
+ virtual void store8(Address addr, int8_t value, Name memoryName) {
WASM_UNREACHABLE("unimp");
}
- virtual void store16(Address addr, int16_t value) {
+ virtual void store16(Address addr, int16_t value, Name memoryName) {
WASM_UNREACHABLE("unimp");
}
- virtual void store32(Address addr, int32_t value) {
+ virtual void store32(Address addr, int32_t value, Name memoryName) {
WASM_UNREACHABLE("unimp");
}
- virtual void store64(Address addr, int64_t value) {
+ virtual void store64(Address addr, int64_t value, Name memoryName) {
WASM_UNREACHABLE("unimp");
}
- virtual void store128(Address addr, const std::array<uint8_t, 16>&) {
+ virtual void
+ store128(Address addr, const std::array<uint8_t, 16>&, Name memoryName) {
WASM_UNREACHABLE("unimp");
}
@@ -2441,8 +2458,6 @@ public:
externalInterface(externalInterface), linkedInstances(linkedInstances_) {
// import globals from the outside
externalInterface->importGlobals(globals, wasm);
- // prepare memory
- memorySize = wasm.memory.initial;
// generate internal (non-imported) globals
ModuleUtils::iterDefinedGlobals(wasm, [&](Global* global) {
globals[global->name] = self()->visit(global->init).values;
@@ -2556,7 +2571,27 @@ private:
});
}
+ struct MemoryInstanceInfo {
+ // The ModuleRunner instance in which the memory is defined.
+ SubType* instance;
+ // The name the memory has in that interface.
+ Name name;
+ };
+
+ MemoryInstanceInfo getMemoryInstanceInfo(Name name) {
+ auto* memory = wasm.getMemory(name);
+ MemoryInstanceInfo memoryInterfaceInfo;
+ if (!memory->imported()) {
+ return MemoryInstanceInfo{self(), name};
+ }
+
+ auto& importedInstance = linkedInstances.at(memory->module);
+ auto* memoryExport = importedInstance->wasm.getExport(memory->base);
+ return importedInstance->getMemoryInstanceInfo(memoryExport->value);
+ }
+
void initializeMemoryContents() {
+ initializeMemorySizes();
Const offset;
offset.value = Literal(uint32_t(0));
offset.finalize();
@@ -2572,6 +2607,7 @@ private:
size.finalize();
MemoryInit init;
+ init.memory = segment->memory;
init.segment = i;
init.dest = segment->offset;
init.offset = &offset;
@@ -2587,6 +2623,31 @@ private:
}
}
+ // in pages, used to keep track of memorySize throughout the below memops
+ std::unordered_map<Name, Address> memorySizes;
+
+ void initializeMemorySizes() {
+ for (auto& memory : wasm.memories) {
+ memorySizes[memory->name] = memory->initial;
+ }
+ }
+
+ Address getMemorySize(Name memory) {
+ auto iter = memorySizes.find(memory);
+ if (iter == memorySizes.end()) {
+ externalInterface->trap("getMemorySize called on non-existing memory");
+ }
+ return iter->second;
+ }
+
+ void setMemorySize(Name memory, Address size) {
+ auto iter = memorySizes.find(memory);
+ if (iter == memorySizes.end()) {
+ externalInterface->trap("setMemorySize called on non-existing memory");
+ }
+ memorySizes[memory] = size;
+ }
+
public:
class FunctionScope {
public:
@@ -2645,15 +2706,6 @@ private:
SmallVector<std::pair<WasmException, Name>, 4> exceptionStack;
protected:
- // Returns the instance that defines the memory used by this one.
- SubType* getMemoryInstance() {
- auto* inst = self();
- while (inst->wasm.memory.imported()) {
- inst = inst->linkedInstances.at(inst->wasm.memory.module).get();
- }
- return inst;
- }
-
// Returns a reference to the current value of a potentially imported global
Literals& getGlobal(Name name) {
auto* inst = self();
@@ -2874,12 +2926,14 @@ public:
return flow;
}
NOTE_EVAL1(flow);
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, flow.getSingleValue());
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto addr =
+ info.instance->getFinalAddress(curr, flow.getSingleValue(), memorySize);
if (curr->isAtomic) {
- inst->checkAtomicAddress(addr, curr->bytes);
+ info.instance->checkAtomicAddress(addr, curr->bytes, memorySize);
}
- auto ret = inst->externalInterface->load(curr, addr);
+ auto ret = info.instance->externalInterface->load(curr, addr, info.name);
NOTE_EVAL1(addr);
NOTE_EVAL1(ret);
return ret;
@@ -2894,14 +2948,17 @@ public:
if (value.breaking()) {
return value;
}
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto addr =
+ info.instance->getFinalAddress(curr, ptr.getSingleValue(), memorySize);
if (curr->isAtomic) {
- inst->checkAtomicAddress(addr, curr->bytes);
+ info.instance->checkAtomicAddress(addr, curr->bytes, memorySize);
}
NOTE_EVAL1(addr);
NOTE_EVAL1(value);
- inst->externalInterface->store(curr, addr, value.getSingleValue());
+ info.instance->externalInterface->store(
+ curr, addr, value.getSingleValue(), info.name);
return Flow();
}
@@ -2916,11 +2973,14 @@ public:
return value;
}
NOTE_EVAL1(ptr);
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto addr =
+ info.instance->getFinalAddress(curr, ptr.getSingleValue(), memorySize);
NOTE_EVAL1(addr);
NOTE_EVAL1(value);
- auto loaded = inst->doAtomicLoad(addr, curr->bytes, curr->type);
+ auto loaded = info.instance->doAtomicLoad(
+ addr, curr->bytes, curr->type, info.name, memorySize);
NOTE_EVAL1(loaded);
auto computed = value.getSingleValue();
switch (curr->op) {
@@ -2942,7 +3002,8 @@ public:
case RMWXchg:
break;
}
- inst->doAtomicStore(addr, curr->bytes, computed);
+ info.instance->doAtomicStore(
+ addr, curr->bytes, computed, info.name, memorySize);
return loaded;
}
Flow visitAtomicCmpxchg(AtomicCmpxchg* curr) {
@@ -2960,16 +3021,20 @@ public:
if (replacement.breaking()) {
return replacement;
}
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto addr =
+ info.instance->getFinalAddress(curr, ptr.getSingleValue(), memorySize);
expected = Flow(wrapToSmallerSize(expected.getSingleValue(), curr->bytes));
NOTE_EVAL1(addr);
NOTE_EVAL1(expected);
NOTE_EVAL1(replacement);
- auto loaded = inst->doAtomicLoad(addr, curr->bytes, curr->type);
+ auto loaded = info.instance->doAtomicLoad(
+ addr, curr->bytes, curr->type, info.name, memorySize);
NOTE_EVAL1(loaded);
if (loaded == expected.getSingleValue()) {
- inst->doAtomicStore(addr, curr->bytes, replacement.getSingleValue());
+ info.instance->doAtomicStore(
+ addr, curr->bytes, replacement.getSingleValue(), info.name, memorySize);
}
return loaded;
}
@@ -2990,10 +3055,13 @@ public:
if (timeout.breaking()) {
return timeout;
}
- auto* inst = getMemoryInstance();
auto bytes = curr->expectedType.getByteSize();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue(), bytes);
- auto loaded = inst->doAtomicLoad(addr, bytes, curr->expectedType);
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto addr = info.instance->getFinalAddress(
+ curr, ptr.getSingleValue(), bytes, memorySize);
+ auto loaded = info.instance->doAtomicLoad(
+ addr, bytes, curr->expectedType, info.name, memorySize);
NOTE_EVAL1(loaded);
if (loaded != expected.getSingleValue()) {
return Literal(int32_t(1)); // not equal
@@ -3014,10 +3082,12 @@ public:
if (count.breaking()) {
return count;
}
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue(), 4);
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto addr =
+ info.instance->getFinalAddress(curr, ptr.getSingleValue(), 4, memorySize);
// Just check TODO actual threads support
- inst->checkAtomicAddress(addr, 4);
+ info.instance->checkAtomicAddress(addr, 4, memorySize);
return Literal(int32_t(0)); // none woken up
}
Flow visitSIMDLoad(SIMDLoad* curr) {
@@ -3043,6 +3113,7 @@ public:
}
Flow visitSIMDLoadSplat(SIMDLoad* curr) {
Load load;
+ load.memory = curr->memory;
load.type = Type::i32;
load.bytes = curr->getMemBytes();
load.signed_ = false;
@@ -3082,30 +3153,37 @@ public:
}
NOTE_EVAL1(flow);
Address src(uint32_t(flow.getSingleValue().geti32()));
- auto* inst = getMemoryInstance();
+ auto info = getMemoryInstanceInfo(curr->memory);
auto loadLane = [&](Address addr) {
switch (curr->op) {
case Load8x8SVec128:
- return Literal(int32_t(inst->externalInterface->load8s(addr)));
+ return Literal(
+ int32_t(info.instance->externalInterface->load8s(addr, info.name)));
case Load8x8UVec128:
- return Literal(int32_t(inst->externalInterface->load8u(addr)));
+ return Literal(
+ int32_t(info.instance->externalInterface->load8u(addr, info.name)));
case Load16x4SVec128:
- return Literal(int32_t(inst->externalInterface->load16s(addr)));
+ return Literal(int32_t(
+ info.instance->externalInterface->load16s(addr, info.name)));
case Load16x4UVec128:
- return Literal(int32_t(inst->externalInterface->load16u(addr)));
+ return Literal(int32_t(
+ info.instance->externalInterface->load16u(addr, info.name)));
case Load32x2SVec128:
- return Literal(int64_t(inst->externalInterface->load32s(addr)));
+ return Literal(int64_t(
+ info.instance->externalInterface->load32s(addr, info.name)));
case Load32x2UVec128:
- return Literal(int64_t(inst->externalInterface->load32u(addr)));
+ return Literal(int64_t(
+ info.instance->externalInterface->load32u(addr, info.name)));
default:
WASM_UNREACHABLE("unexpected op");
}
WASM_UNREACHABLE("invalid op");
};
+ auto memorySize = info.instance->getMemorySize(info.name);
auto fillLanes = [&](auto lanes, size_t laneBytes) {
for (auto& lane : lanes) {
- lane = loadLane(
- inst->getFinalAddress(curr, Literal(uint32_t(src)), laneBytes));
+ lane = loadLane(info.instance->getFinalAddress(
+ curr, Literal(uint32_t(src)), laneBytes, memorySize));
src = Address(uint32_t(src) + laneBytes);
}
return Literal(lanes);
@@ -3137,16 +3215,19 @@ public:
return flow;
}
NOTE_EVAL1(flow);
- auto* inst = getMemoryInstance();
- Address src =
- inst->getFinalAddress(curr, flow.getSingleValue(), curr->getMemBytes());
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ Address src = info.instance->getFinalAddress(
+ curr, flow.getSingleValue(), curr->getMemBytes(), memorySize);
auto zero =
Literal::makeZero(curr->op == Load32ZeroVec128 ? Type::i32 : Type::i64);
if (curr->op == Load32ZeroVec128) {
- auto val = Literal(inst->externalInterface->load32u(src));
+ auto val =
+ Literal(info.instance->externalInterface->load32u(src, info.name));
return Literal(std::array<Literal, 4>{{val, zero, zero, zero}});
} else {
- auto val = Literal(inst->externalInterface->load64u(src));
+ auto val =
+ Literal(info.instance->externalInterface->load64u(src, info.name));
return Literal(std::array<Literal, 2>{{val, zero}});
}
}
@@ -3157,9 +3238,10 @@ public:
return flow;
}
NOTE_EVAL1(flow);
- auto* inst = getMemoryInstance();
- Address addr =
- inst->getFinalAddress(curr, flow.getSingleValue(), curr->getMemBytes());
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ Address addr = info.instance->getFinalAddress(
+ curr, flow.getSingleValue(), curr->getMemBytes(), memorySize);
flow = self()->visit(curr->vec);
if (flow.breaking()) {
return flow;
@@ -3170,10 +3252,12 @@ public:
case Store8LaneVec128: {
std::array<Literal, 16> lanes = vec.getLanesUI8x16();
if (curr->isLoad()) {
- lanes[curr->index] = Literal(inst->externalInterface->load8u(addr));
+ lanes[curr->index] =
+ Literal(info.instance->externalInterface->load8u(addr, info.name));
return Literal(lanes);
} else {
- inst->externalInterface->store8(addr, lanes[curr->index].geti32());
+ info.instance->externalInterface->store8(
+ addr, lanes[curr->index].geti32(), info.name);
return {};
}
}
@@ -3181,10 +3265,12 @@ public:
case Store16LaneVec128: {
std::array<Literal, 8> lanes = vec.getLanesUI16x8();
if (curr->isLoad()) {
- lanes[curr->index] = Literal(inst->externalInterface->load16u(addr));
+ lanes[curr->index] =
+ Literal(info.instance->externalInterface->load16u(addr, info.name));
return Literal(lanes);
} else {
- inst->externalInterface->store16(addr, lanes[curr->index].geti32());
+ info.instance->externalInterface->store16(
+ addr, lanes[curr->index].geti32(), info.name);
return {};
}
}
@@ -3192,10 +3278,12 @@ public:
case Store32LaneVec128: {
std::array<Literal, 4> lanes = vec.getLanesI32x4();
if (curr->isLoad()) {
- lanes[curr->index] = Literal(inst->externalInterface->load32u(addr));
+ lanes[curr->index] =
+ Literal(info.instance->externalInterface->load32u(addr, info.name));
return Literal(lanes);
} else {
- inst->externalInterface->store32(addr, lanes[curr->index].geti32());
+ info.instance->externalInterface->store32(
+ addr, lanes[curr->index].geti32(), info.name);
return {};
}
}
@@ -3203,10 +3291,12 @@ public:
case Load64LaneVec128: {
std::array<Literal, 2> lanes = vec.getLanesI64x2();
if (curr->isLoad()) {
- lanes[curr->index] = Literal(inst->externalInterface->load64u(addr));
+ lanes[curr->index] =
+ Literal(info.instance->externalInterface->load64u(addr, info.name));
return Literal(lanes);
} else {
- inst->externalInterface->store64(addr, lanes[curr->index].geti64());
+ info.instance->externalInterface->store64(
+ addr, lanes[curr->index].geti64(), info.name);
return {};
}
}
@@ -3215,38 +3305,44 @@ public:
}
Flow visitMemorySize(MemorySize* curr) {
NOTE_ENTER("MemorySize");
- auto* inst = getMemoryInstance();
- return Literal::makeFromInt64(inst->memorySize,
- inst->wasm.memory.indexType);
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto* memory = info.instance->wasm.getMemory(info.name);
+ return Literal::makeFromInt64(memorySize, memory->indexType);
}
Flow visitMemoryGrow(MemoryGrow* curr) {
NOTE_ENTER("MemoryGrow");
- auto* inst = getMemoryInstance();
- auto indexType = inst->wasm.memory.indexType;
- auto fail = Literal::makeFromInt64(-1, indexType);
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ auto* memory = info.instance->wasm.getMemory(info.name);
+ auto indexType = memory->indexType;
+ auto fail = Literal::makeFromInt64(-1, memory->indexType);
Flow flow = self()->visit(curr->delta);
if (flow.breaking()) {
return flow;
}
- Flow ret = Literal::makeFromInt64(inst->memorySize, indexType);
+ Flow ret = Literal::makeFromInt64(memorySize, indexType);
uint64_t delta = flow.getSingleValue().getUnsigned();
if (delta > uint32_t(-1) / Memory::kPageSize && indexType == Type::i32) {
return fail;
}
- if (inst->memorySize >= uint32_t(-1) - delta && indexType == Type::i32) {
+ if (memorySize >= uint32_t(-1) - delta && indexType == Type::i32) {
return fail;
}
- auto newSize = inst->memorySize + delta;
- if (newSize > inst->wasm.memory.max) {
+ auto newSize = memorySize + delta;
+ if (newSize > memory->max) {
return fail;
}
- if (!inst->externalInterface->growMemory(
- inst->memorySize * Memory::kPageSize, newSize * Memory::kPageSize)) {
+ if (!info.instance->externalInterface->growMemory(
+ info.name,
+ memorySize * Memory::kPageSize,
+ newSize * Memory::kPageSize)) {
// We failed to grow the memory in practice, even though it was valid
// to try to do so.
return fail;
}
- inst->memorySize = newSize;
+ memorySize = newSize;
+ info.instance->setMemorySize(info.name, memorySize);
return ret;
}
Flow visitMemoryInit(MemoryInit* curr) {
@@ -3280,15 +3376,17 @@ public:
if ((uint64_t)offsetVal + sizeVal > segment->data.size()) {
trap("out of bounds segment access in memory.init");
}
- auto* inst = getMemoryInstance();
- if (destVal + sizeVal > inst->memorySize * Memory::kPageSize) {
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
+ if (destVal + sizeVal > memorySize * Memory::kPageSize) {
trap("out of bounds memory access in memory.init");
}
for (size_t i = 0; i < sizeVal; ++i) {
Literal addr(destVal + i);
- inst->externalInterface->store8(
- inst->getFinalAddressWithoutOffset(addr, 1),
- segment->data[offsetVal + i]);
+ info.instance->externalInterface->store8(
+ info.instance->getFinalAddressWithoutOffset(addr, 1, memorySize),
+ segment->data[offsetVal + i],
+ info.name);
}
return {};
}
@@ -3318,9 +3416,12 @@ public:
Address sourceVal(source.getSingleValue().getUnsigned());
Address sizeVal(size.getSingleValue().getUnsigned());
- auto* inst = getMemoryInstance();
- if (sourceVal + sizeVal > inst->memorySize * Memory::kPageSize ||
- destVal + sizeVal > inst->memorySize * Memory::kPageSize ||
+ auto destInfo = getMemoryInstanceInfo(curr->destMemory);
+ auto sourceInfo = getMemoryInstanceInfo(curr->sourceMemory);
+ auto destMemorySize = destInfo.instance->getMemorySize(destInfo.name);
+ auto sourceMemorySize = sourceInfo.instance->getMemorySize(sourceInfo.name);
+ if (sourceVal + sizeVal > sourceMemorySize * Memory::kPageSize ||
+ destVal + sizeVal > destMemorySize * Memory::kPageSize ||
// FIXME: better/cheaper way to detect wrapping?
sourceVal + sizeVal < sourceVal || sourceVal + sizeVal < sizeVal ||
destVal + sizeVal < destVal || destVal + sizeVal < sizeVal) {
@@ -3337,10 +3438,14 @@ public:
step = -1;
}
for (int64_t i = start; i != end; i += step) {
- inst->externalInterface->store8(
- inst->getFinalAddressWithoutOffset(Literal(destVal + i), 1),
- inst->externalInterface->load8s(
- inst->getFinalAddressWithoutOffset(Literal(sourceVal + i), 1)));
+ destInfo.instance->externalInterface->store8(
+ destInfo.instance->getFinalAddressWithoutOffset(
+ Literal(destVal + i), 1, destMemorySize),
+ sourceInfo.instance->externalInterface->load8s(
+ sourceInfo.instance->getFinalAddressWithoutOffset(
+ Literal(sourceVal + i), 1, sourceMemorySize),
+ sourceInfo.name),
+ destInfo.name);
}
return {};
}
@@ -3364,17 +3469,21 @@ public:
Address destVal(dest.getSingleValue().getUnsigned());
Address sizeVal(size.getSingleValue().getUnsigned());
- auto* inst = getMemoryInstance();
+ auto info = getMemoryInstanceInfo(curr->memory);
+ auto memorySize = info.instance->getMemorySize(info.name);
// FIXME: cheaper wrapping detection?
- if (destVal > inst->memorySize * Memory::kPageSize ||
- sizeVal > inst->memorySize * Memory::kPageSize ||
- destVal + sizeVal > inst->memorySize * Memory::kPageSize) {
+ if (destVal > memorySize * Memory::kPageSize ||
+ sizeVal > memorySize * Memory::kPageSize ||
+ destVal + sizeVal > memorySize * Memory::kPageSize) {
trap("out of bounds memory access in memory.fill");
}
uint8_t val(value.getSingleValue().geti32());
for (size_t i = 0; i < sizeVal; ++i) {
- inst->externalInterface->store8(
- inst->getFinalAddressWithoutOffset(Literal(destVal + i), 1), val);
+ info.instance->externalInterface->store8(
+ info.instance->getFinalAddressWithoutOffset(
+ Literal(destVal + i), 1, memorySize),
+ val,
+ info.name);
}
return {};
}
@@ -3550,8 +3659,6 @@ public:
static const Index maxDepth = 250;
protected:
- Address memorySize; // in pages
-
void trapIfGt(uint64_t lhs, uint64_t rhs, const char* msg) {
if (lhs > rhs) {
std::stringstream ss;
@@ -3561,34 +3668,37 @@ protected:
}
template<class LS>
- Address getFinalAddress(LS* curr, Literal ptr, Index bytes) {
+ Address
+ getFinalAddress(LS* curr, Literal ptr, Index bytes, Address memorySize) {
Address memorySizeBytes = memorySize * Memory::kPageSize;
uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64();
trapIfGt(curr->offset, memorySizeBytes, "offset > memory");
trapIfGt(addr, memorySizeBytes - curr->offset, "final > memory");
addr += curr->offset;
trapIfGt(bytes, memorySizeBytes, "bytes > memory");
- checkLoadAddress(addr, bytes);
+ checkLoadAddress(addr, bytes, memorySize);
return addr;
}
- template<class LS> Address getFinalAddress(LS* curr, Literal ptr) {
- return getFinalAddress(curr, ptr, curr->bytes);
+ template<class LS>
+ Address getFinalAddress(LS* curr, Literal ptr, Address memorySize) {
+ return getFinalAddress(curr, ptr, curr->bytes, memorySize);
}
- Address getFinalAddressWithoutOffset(Literal ptr, Index bytes) {
+ Address
+ getFinalAddressWithoutOffset(Literal ptr, Index bytes, Address memorySize) {
uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64();
- checkLoadAddress(addr, bytes);
+ checkLoadAddress(addr, bytes, memorySize);
return addr;
}
- void checkLoadAddress(Address addr, Index bytes) {
+ void checkLoadAddress(Address addr, Index bytes, Address memorySize) {
Address memorySizeBytes = memorySize * Memory::kPageSize;
trapIfGt(addr, memorySizeBytes - bytes, "highest > memory");
}
- void checkAtomicAddress(Address addr, Index bytes) {
- checkLoadAddress(addr, bytes);
+ void checkAtomicAddress(Address addr, Index bytes, Address memorySize) {
+ checkLoadAddress(addr, bytes, memorySize);
// Unaligned atomics trap.
if (bytes > 1) {
if (addr & (bytes - 1)) {
@@ -3597,8 +3707,9 @@ protected:
}
}
- Literal doAtomicLoad(Address addr, Index bytes, Type type) {
- checkAtomicAddress(addr, bytes);
+ Literal doAtomicLoad(
+ Address addr, Index bytes, Type type, Name memoryName, Address memorySize) {
+ checkAtomicAddress(addr, bytes, memorySize);
Const ptr;
ptr.value = Literal(int32_t(addr));
ptr.type = Type::i32;
@@ -3611,11 +3722,16 @@ protected:
load.isAtomic = true; // understatement
load.ptr = &ptr;
load.type = type;
- return externalInterface->load(&load, addr);
+ load.memory = memoryName;
+ return externalInterface->load(&load, addr, memoryName);
}
- void doAtomicStore(Address addr, Index bytes, Literal toStore) {
- checkAtomicAddress(addr, bytes);
+ void doAtomicStore(Address addr,
+ Index bytes,
+ Literal toStore,
+ Name memoryName,
+ Address memorySize) {
+ checkAtomicAddress(addr, bytes, memorySize);
Const ptr;
ptr.value = Literal(int32_t(addr));
ptr.type = Type::i32;
@@ -3629,7 +3745,8 @@ protected:
store.ptr = &ptr;
store.value = &value;
store.valueType = value.type;
- return externalInterface->store(&store, addr, toStore);
+ store.memory = memoryName;
+ return externalInterface->store(&store, addr, toStore, memoryName);
}
ExternalInterface* externalInterface;
diff --git a/src/wasm-s-parser.h b/src/wasm-s-parser.h
index 64f764721..c91c01842 100644
--- a/src/wasm-s-parser.h
+++ b/src/wasm-s-parser.h
@@ -126,6 +126,7 @@ class SExpressionWasmBuilder {
std::vector<Name> functionNames;
std::vector<Name> tableNames;
+ std::vector<Name> memoryNames;
std::vector<Name> globalNames;
std::vector<Name> tagNames;
int functionCounter = 0;
@@ -158,6 +159,7 @@ private:
void preParseFunctionType(Element& s);
bool isImport(Element& curr);
void preParseImports(Element& curr);
+ void preParseMemory(Element& curr);
void parseModuleElement(Element& curr);
// function parsing state
@@ -168,10 +170,15 @@ private:
Name getFunctionName(Element& s);
Name getTableName(Element& s);
+ Name getMemoryName(Element& s);
Name getGlobalName(Element& s);
Name getTagName(Element& s);
void parseStart(Element& s) { wasm.addStart(getFunctionName(*s[1])); }
+ Name getMemoryNameAtIdx(Index i);
+ bool isMemory64(Name memoryName);
+ bool hasMemoryIdx(Element& s, Index defaultSize, Index i);
+
// returns the next index in s
size_t parseFunctionNames(Element& s, Name& name, Name& exportName);
void parseFunction(Element& s, bool preParseImport = false);
@@ -311,8 +318,12 @@ private:
// Helper functions
Type parseOptionalResultType(Element& s, Index& i);
- Index parseMemoryLimits(Element& s, Index i);
- Index parseMemoryIndex(Element& s, Index i);
+ Index parseMemoryLimits(Element& s, Index i, std::unique_ptr<Memory>& memory);
+ Index parseMemoryIndex(Element& s, Index i, std::unique_ptr<Memory>& memory);
+ Index parseMemoryForInstruction(const std::string& instrName,
+ Memory& memory,
+ Element& s,
+ Index i);
std::vector<Type> parseParamOrLocal(Element& s);
std::vector<NameType> parseParamOrLocal(Element& s, size_t& localIndex);
std::vector<Type> parseResults(Element& s);
@@ -326,12 +337,7 @@ private:
void stringToBinary(const char* input, size_t size, std::vector<char>& data);
void parseMemory(Element& s, bool preParseImport = false);
void parseData(Element& s);
- void parseInnerData(Element& s,
- Index i,
- Name name,
- bool hasExplicitName,
- Expression* offset,
- bool isPassive);
+ void parseInnerData(Element& s, Index i, std::unique_ptr<DataSegment>& seg);
void parseExport(Element& s);
void parseImport(Element& s);
void parseGlobal(Element& s, bool preParseImport = false);
diff --git a/src/wasm-stack.h b/src/wasm-stack.h
index 2a007739d..28c3a0836 100644
--- a/src/wasm-stack.h
+++ b/src/wasm-stack.h
@@ -122,7 +122,10 @@ public:
MappedLocals mappedLocals;
private:
- void emitMemoryAccess(size_t alignment, size_t bytes, uint32_t offset);
+ void emitMemoryAccess(size_t alignment,
+ size_t bytes,
+ uint32_t offset,
+ Name memory);
int32_t getBreakIndex(Name name);
WasmBinaryWriter& parent;
diff --git a/src/wasm-traversal.h b/src/wasm-traversal.h
index 7c10be3a9..a77356a4c 100644
--- a/src/wasm-traversal.h
+++ b/src/wasm-traversal.h
@@ -258,7 +258,9 @@ struct Walker : public VisitorType {
for (auto& curr : module->elementSegments) {
self->walkElementSegment(curr.get());
}
- self->walkMemory(&module->memory);
+ for (auto& curr : module->memories) {
+ self->walkMemory(curr.get());
+ }
for (auto& curr : module->dataSegments) {
self->walkDataSegment(curr.get());
}
diff --git a/src/wasm.h b/src/wasm.h
index 04d4391a2..62a63e493 100644
--- a/src/wasm.h
+++ b/src/wasm.h
@@ -969,6 +969,7 @@ public:
Address align;
bool isAtomic;
Expression* ptr;
+ Name memory;
// type must be set during creation, cannot be inferred
@@ -987,6 +988,7 @@ public:
Expression* ptr;
Expression* value;
Type valueType;
+ Name memory;
void finalize();
};
@@ -1001,6 +1003,7 @@ public:
Address offset;
Expression* ptr;
Expression* value;
+ Name memory;
void finalize();
};
@@ -1015,6 +1018,7 @@ public:
Expression* ptr;
Expression* expected;
Expression* replacement;
+ Name memory;
void finalize();
};
@@ -1029,6 +1033,7 @@ public:
Expression* expected;
Expression* timeout;
Type expectedType;
+ Name memory;
void finalize();
};
@@ -1041,6 +1046,7 @@ public:
Address offset;
Expression* ptr;
Expression* notifyCount;
+ Name memory;
void finalize();
};
@@ -1129,6 +1135,7 @@ public:
Address offset;
Address align;
Expression* ptr;
+ Name memory;
Index getMemBytes();
void finalize();
@@ -1146,6 +1153,7 @@ public:
uint8_t index;
Expression* ptr;
Expression* vec;
+ Name memory;
bool isStore();
bool isLoad() { return !isStore(); }
@@ -1162,6 +1170,7 @@ public:
Expression* dest;
Expression* offset;
Expression* size;
+ Name memory;
void finalize();
};
@@ -1184,6 +1193,8 @@ public:
Expression* dest;
Expression* source;
Expression* size;
+ Name destMemory;
+ Name sourceMemory;
void finalize();
};
@@ -1196,6 +1207,7 @@ public:
Expression* dest;
Expression* value;
Expression* size;
+ Name memory;
void finalize();
};
@@ -1279,6 +1291,7 @@ public:
MemorySize(MixedArena& allocator) : MemorySize() {}
Type ptrType = Type::i32;
+ Name memory;
void make64();
void finalize();
@@ -1291,6 +1304,7 @@ public:
Expression* delta = nullptr;
Type ptrType = Type::i32;
+ Name memory;
void make64();
void finalize();
@@ -2029,6 +2043,7 @@ public:
class DataSegment : public Named {
public:
+ Name memory;
bool isPassive = false;
Expression* offset = nullptr;
std::vector<char> data; // TODO: optimize
@@ -2042,18 +2057,15 @@ public:
static const Address::address32_t kMaxSize32 =
(uint64_t(4) * 1024 * 1024 * 1024) / kPageSize;
- bool exists = false;
Address initial = 0; // sizes are in pages
Address max = kMaxSize32;
bool shared = false;
Type indexType = Type::i32;
- Memory() { name = Name::fromInt(0); }
bool hasMax() { return max != kUnlimitedSize; }
bool is64() { return indexType == Type::i64; }
void clear() {
- exists = false;
name = "";
initial = 0;
max = kMaxSize32;
@@ -2100,10 +2112,10 @@ public:
std::vector<std::unique_ptr<Global>> globals;
std::vector<std::unique_ptr<Tag>> tags;
std::vector<std::unique_ptr<ElementSegment>> elementSegments;
+ std::vector<std::unique_ptr<Memory>> memories;
std::vector<std::unique_ptr<DataSegment>> dataSegments;
std::vector<std::unique_ptr<Table>> tables;
- Memory memory;
Name start;
std::vector<UserSection> userSections;
@@ -2135,6 +2147,7 @@ private:
std::unordered_map<Name, Export*> exportsMap;
std::unordered_map<Name, Function*> functionsMap;
std::unordered_map<Name, Table*> tablesMap;
+ std::unordered_map<Name, Memory*> memoriesMap;
std::unordered_map<Name, ElementSegment*> elementSegmentsMap;
std::unordered_map<Name, DataSegment*> dataSegmentsMap;
std::unordered_map<Name, Global*> globalsMap;
@@ -2147,12 +2160,14 @@ public:
Function* getFunction(Name name);
Table* getTable(Name name);
ElementSegment* getElementSegment(Name name);
+ Memory* getMemory(Name name);
DataSegment* getDataSegment(Name name);
Global* getGlobal(Name name);
Tag* getTag(Name name);
Export* getExportOrNull(Name name);
Table* getTableOrNull(Name name);
+ Memory* getMemoryOrNull(Name name);
ElementSegment* getElementSegmentOrNull(Name name);
DataSegment* getDataSegmentOrNull(Name name);
Function* getFunctionOrNull(Name name);
@@ -2168,6 +2183,7 @@ public:
Function* addFunction(std::unique_ptr<Function>&& curr);
Table* addTable(std::unique_ptr<Table>&& curr);
ElementSegment* addElementSegment(std::unique_ptr<ElementSegment>&& curr);
+ Memory* addMemory(std::unique_ptr<Memory>&& curr);
DataSegment* addDataSegment(std::unique_ptr<DataSegment>&& curr);
Global* addGlobal(std::unique_ptr<Global>&& curr);
Tag* addTag(std::unique_ptr<Tag>&& curr);
@@ -2178,6 +2194,7 @@ public:
void removeFunction(Name name);
void removeTable(Name name);
void removeElementSegment(Name name);
+ void removeMemory(Name name);
void removeDataSegment(Name name);
void removeGlobal(Name name);
void removeTag(Name name);
@@ -2186,10 +2203,12 @@ public:
void removeFunctions(std::function<bool(Function*)> pred);
void removeTables(std::function<bool(Table*)> pred);
void removeElementSegments(std::function<bool(ElementSegment*)> pred);
+ void removeMemories(std::function<bool(Memory*)> pred);
void removeDataSegments(std::function<bool(DataSegment*)> pred);
void removeGlobals(std::function<bool(Global*)> pred);
void removeTags(std::function<bool(Tag*)> pred);
+ void updateDataSegmentsMap();
void updateMaps();
void clearDebugInfo();
diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp
index e4b942203..7008d185c 100644
--- a/src/wasm/wasm-binary.cpp
+++ b/src/wasm/wasm-binary.cpp
@@ -52,7 +52,7 @@ void WasmBinaryWriter::write() {
writeImports();
writeFunctionSignatures();
writeTableDeclarations();
- writeMemory();
+ writeMemories();
writeTags();
if (wasm->features.hasStrings()) {
writeStrings();
@@ -203,18 +203,21 @@ void WasmBinaryWriter::writeStart() {
finishSection(start);
}
-void WasmBinaryWriter::writeMemory() {
- if (!wasm->memory.exists || wasm->memory.imported()) {
+void WasmBinaryWriter::writeMemories() {
+ if (wasm->memories.empty()) {
return;
}
- BYN_TRACE("== writeMemory\n");
+ BYN_TRACE("== writeMemories\n");
auto start = startSection(BinaryConsts::Section::Memory);
- o << U32LEB(1); // Define 1 memory
- writeResizableLimits(wasm->memory.initial,
- wasm->memory.max,
- wasm->memory.hasMax(),
- wasm->memory.shared,
- wasm->memory.is64());
+ auto num = importInfo->getNumDefinedMemories();
+ o << U32LEB(num);
+ ModuleUtils::iterDefinedMemories(*wasm, [&](Memory* memory) {
+ writeResizableLimits(memory->initial,
+ memory->max,
+ memory->hasMax(),
+ memory->shared,
+ memory->is64());
+ });
finishSection(start);
}
@@ -333,16 +336,16 @@ void WasmBinaryWriter::writeImports() {
o << uint8_t(0); // Reserved 'attribute' field. Always 0.
o << U32LEB(getTypeIndex(tag->sig));
});
- if (wasm->memory.imported()) {
+ ModuleUtils::iterImportedMemories(*wasm, [&](Memory* memory) {
BYN_TRACE("write one memory\n");
- writeImportHeader(&wasm->memory);
+ writeImportHeader(memory);
o << U32LEB(int32_t(ExternalKind::Memory));
- writeResizableLimits(wasm->memory.initial,
- wasm->memory.max,
- wasm->memory.hasMax(),
- wasm->memory.shared,
- wasm->memory.is64());
- }
+ writeResizableLimits(memory->initial,
+ memory->max,
+ memory->hasMax(),
+ memory->shared,
+ memory->is64());
+ });
ModuleUtils::iterImportedTables(*wasm, [&](Table* table) {
BYN_TRACE("write one table\n");
writeImportHeader(table);
@@ -566,8 +569,7 @@ void WasmBinaryWriter::writeExports() {
o << U32LEB(getTableIndex(curr->value));
break;
case ExternalKind::Memory:
- // TODO: fix with multi-memory
- o << U32LEB(0);
+ o << U32LEB(getMemoryIndex(curr->value));
break;
case ExternalKind::Global:
o << U32LEB(getGlobalIndex(curr->value));
@@ -629,6 +631,12 @@ uint32_t WasmBinaryWriter::getTableIndex(Name name) const {
return it->second;
}
+uint32_t WasmBinaryWriter::getMemoryIndex(Name name) const {
+ auto it = indexes.memoryIndexes.find(name);
+ assert(it != indexes.memoryIndexes.end());
+ return it->second;
+}
+
uint32_t WasmBinaryWriter::getGlobalIndex(Name name) const {
auto it = indexes.globalIndexes.find(name);
assert(it != indexes.globalIndexes.end());
@@ -930,12 +938,28 @@ void WasmBinaryWriter::writeNames() {
}
// memory names
- if (wasm->memory.exists && wasm->memory.hasExplicitName) {
- auto substart =
- startSubsection(BinaryConsts::UserSections::Subsection::NameMemory);
- o << U32LEB(1) << U32LEB(0); // currently exactly 1 memory at index 0
- writeEscapedName(wasm->memory.name.str);
- finishSubsection(substart);
+ {
+ std::vector<std::pair<Index, Memory*>> memoriesWithNames;
+ Index checked = 0;
+ auto check = [&](Memory* curr) {
+ if (curr->hasExplicitName) {
+ memoriesWithNames.push_back({checked, curr});
+ }
+ checked++;
+ };
+ ModuleUtils::iterImportedMemories(*wasm, check);
+ ModuleUtils::iterDefinedMemories(*wasm, check);
+ assert(checked == indexes.memoryIndexes.size());
+ if (memoriesWithNames.size() > 0) {
+ auto substart =
+ startSubsection(BinaryConsts::UserSections::Subsection::NameMemory);
+ o << U32LEB(memoriesWithNames.size());
+ for (auto& [index, memory] : memoriesWithNames) {
+ o << U32LEB(index);
+ writeEscapedName(memory->name.str);
+ }
+ finishSubsection(substart);
+ }
}
// global names
@@ -990,7 +1014,7 @@ void WasmBinaryWriter::writeNames() {
}
// data segment names
- if (wasm->memory.exists) {
+ if (!wasm->memories.empty()) {
Index count = 0;
for (auto& seg : wasm->dataSegments) {
if (seg->hasExplicitName) {
@@ -1547,7 +1571,7 @@ void WasmBinaryBuilder::read() {
readStart();
break;
case BinaryConsts::Section::Memory:
- readMemory();
+ readMemories();
break;
case BinaryConsts::Section::Type:
readTypes();
@@ -1761,10 +1785,6 @@ int64_t WasmBinaryBuilder::getS64LEB() {
return ret.value;
}
-uint64_t WasmBinaryBuilder::getUPtrLEB() {
- return wasm.memory.is64() ? getU64LEB() : getU32LEB();
-}
-
bool WasmBinaryBuilder::getBasicType(int32_t code, Type& out) {
switch (code) {
case BinaryConsts::EncodedType::i32:
@@ -1963,24 +1983,20 @@ void WasmBinaryBuilder::readStart() {
startIndex = getU32LEB();
}
-void WasmBinaryBuilder::readMemory() {
- BYN_TRACE("== readMemory\n");
- auto numMemories = getU32LEB();
- if (!numMemories) {
- return;
- }
- if (numMemories != 1) {
- throwError("Must be exactly 1 memory");
- }
- if (wasm.memory.exists) {
- throwError("Memory cannot be both imported and defined");
+void WasmBinaryBuilder::readMemories() {
+ BYN_TRACE("== readMemories\n");
+ auto num = getU32LEB();
+ BYN_TRACE("num: " << num << std::endl);
+ for (size_t i = 0; i < num; i++) {
+ BYN_TRACE("read one\n");
+ auto memory = Builder::makeMemory(Name::fromInt(i));
+ getResizableLimits(memory->initial,
+ memory->max,
+ memory->shared,
+ memory->indexType,
+ Memory::kUnlimitedSize);
+ memories.push_back(std::move(memory));
}
- wasm.memory.exists = true;
- getResizableLimits(wasm.memory.initial,
- wasm.memory.max,
- wasm.memory.shared,
- wasm.memory.indexType,
- Memory::kUnlimitedSize);
}
void WasmBinaryBuilder::readTypes() {
@@ -2171,6 +2187,13 @@ Name WasmBinaryBuilder::getTableName(Index index) {
return wasm.tables[index]->name;
}
+Name WasmBinaryBuilder::getMemoryName(Index index) {
+ if (index >= wasm.memories.size()) {
+ throwError("invalid memory index");
+ }
+ return wasm.memories[index]->name;
+}
+
Name WasmBinaryBuilder::getGlobalName(Index index) {
if (index >= wasm.globals.size()) {
throwError("invalid global index");
@@ -2270,15 +2293,16 @@ void WasmBinaryBuilder::readImports() {
}
case ExternalKind::Memory: {
Name name(std::string("mimport$") + std::to_string(memoryCounter++));
- wasm.memory.module = module;
- wasm.memory.base = base;
- wasm.memory.name = name;
- wasm.memory.exists = true;
- getResizableLimits(wasm.memory.initial,
- wasm.memory.max,
- wasm.memory.shared,
- wasm.memory.indexType,
+ auto memory = builder.makeMemory(name);
+ memory->module = module;
+ memory->base = base;
+ getResizableLimits(memory->initial,
+ memory->max,
+ memory->shared,
+ memory->indexType,
Memory::kUnlimitedSize);
+ memoryImports.push_back(memory.get());
+ wasm.addMemory(std::move(memory));
break;
}
case ExternalKind::Global: {
@@ -2923,6 +2947,9 @@ void WasmBinaryBuilder::processNames() {
for (auto& segment : elementSegments) {
wasm.addElementSegment(std::move(segment));
}
+ for (auto& memory : memories) {
+ wasm.addMemory(std::move(memory));
+ }
for (auto& segment : dataSegments) {
wasm.addDataSegment(std::move(segment));
}
@@ -2943,7 +2970,7 @@ void WasmBinaryBuilder::processNames() {
curr->value = getTableName(index);
break;
case ExternalKind::Memory:
- curr->value = wasm.memory.name;
+ curr->value = getMemoryName(index);
break;
case ExternalKind::Global:
curr->value = getGlobalName(index);
@@ -2969,6 +2996,12 @@ void WasmBinaryBuilder::processNames() {
}
}
+ for (auto& [index, refs] : memoryRefs) {
+ for (auto ref : refs) {
+ *ref = getMemoryName(index);
+ }
+ }
+
for (auto& [index, refs] : globalRefs) {
for (auto* ref : refs) {
*ref = getGlobalName(index);
@@ -2998,12 +3031,21 @@ void WasmBinaryBuilder::readDataSegments() {
}
curr->setName(Name::fromInt(i), false);
curr->isPassive = flags & BinaryConsts::IsPassive;
+ Index memIdx = 0;
if (flags & BinaryConsts::HasIndex) {
- auto memIndex = getU32LEB();
- if (memIndex != 0) {
- throwError("nonzero memory index");
- }
+ memIdx = getU32LEB();
+ }
+ Memory* memory = nullptr;
+ Index numMemoryImports = memoryImports.size();
+ if (memIdx < numMemoryImports) {
+ memory = memoryImports[memIdx];
+ } else if (memIdx - numMemoryImports < memories.size()) {
+ memory = memories[memIdx - numMemoryImports].get();
}
+ if (!memory) {
+ throwError("Memory index out of range while reading data segments.");
+ }
+ curr->memory = memory->name;
if (!curr->isPassive) {
curr->offset = readExpression();
}
@@ -3333,11 +3375,16 @@ void WasmBinaryBuilder::readNames(size_t payloadLen) {
}
} else if (nameType == BinaryConsts::UserSections::Subsection::NameMemory) {
auto num = getU32LEB();
+ NameProcessor processor;
for (size_t i = 0; i < num; i++) {
auto index = getU32LEB();
auto rawName = getInlineString();
- if (index == 0) {
- wasm.memory.setExplicitName(escape(rawName));
+ auto name = processor.process(rawName);
+ auto numMemoryImports = memoryImports.size();
+ if (index < numMemoryImports) {
+ memoryImports[index]->setExplicitName(name);
+ } else if (index - numMemoryImports < memories.size()) {
+ memories[index - numMemoryImports]->setExplicitName(name);
} else {
std::cerr << "warning: memory index out of bounds in name section, "
"memory subsection: "
@@ -3713,18 +3760,12 @@ BinaryConsts::ASTNodes WasmBinaryBuilder::readExpression(Expression*& curr) {
break;
case BinaryConsts::MemorySize: {
auto size = allocator.alloc<MemorySize>();
- if (wasm.memory.is64()) {
- size->make64();
- }
curr = size;
visitMemorySize(size);
break;
}
case BinaryConsts::MemoryGrow: {
auto grow = allocator.alloc<MemoryGrow>();
- if (wasm.memory.is64()) {
- grow->make64();
- }
curr = grow;
visitMemoryGrow(grow);
break;
@@ -4304,13 +4345,39 @@ void WasmBinaryBuilder::visitGlobalSet(GlobalSet* curr) {
curr->finalize();
}
-void WasmBinaryBuilder::readMemoryAccess(Address& alignment, Address& offset) {
+Index WasmBinaryBuilder::readMemoryAccess(Address& alignment, Address& offset) {
auto rawAlignment = getU32LEB();
- if (rawAlignment > 4) {
+ bool hasMemIdx = false;
+ Index memIdx = 0;
+ // Check bit 6 in the alignment to know whether a memory index is present per:
+ // https://github.com/WebAssembly/multi-memory/blob/main/proposals/multi-memory/Overview.md
+ if (rawAlignment & (1 << (6))) {
+ hasMemIdx = true;
+ // Clear the bit before we parse alignment
+ rawAlignment = rawAlignment & ~(1 << 6);
+ }
+
+ if (rawAlignment > 8) {
throwError("Alignment must be of a reasonable size");
}
+
alignment = Bits::pow2(rawAlignment);
- offset = getUPtrLEB();
+ if (hasMemIdx) {
+ memIdx = getU32LEB();
+ }
+ Memory* memory = nullptr;
+ auto numMemoryImports = memoryImports.size();
+ if (memIdx < numMemoryImports) {
+ memory = memoryImports[memIdx];
+ } else if (memIdx - numMemoryImports < memories.size()) {
+ memory = memories[memIdx - numMemoryImports].get();
+ }
+ if (!memory) {
+ throwError("Memory index out of range while reading memory alignment.");
+ }
+ offset = memory->indexType == Type::i32 ? getU32LEB() : getU64LEB();
+
+ return memIdx;
}
bool WasmBinaryBuilder::maybeVisitLoad(Expression*& out,
@@ -4445,7 +4512,8 @@ bool WasmBinaryBuilder::maybeVisitLoad(Expression*& out,
}
curr->isAtomic = isAtomic;
- readMemoryAccess(curr->align, curr->offset);
+ Index memIdx = readMemoryAccess(curr->align, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
curr->ptr = popNonVoidExpression();
curr->finalize();
out = curr;
@@ -4550,7 +4618,8 @@ bool WasmBinaryBuilder::maybeVisitStore(Expression*& out,
curr->isAtomic = isAtomic;
BYN_TRACE("zz node: Store\n");
- readMemoryAccess(curr->align, curr->offset);
+ Index memIdx = readMemoryAccess(curr->align, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
curr->value = popNonVoidExpression();
curr->ptr = popNonVoidExpression();
curr->finalize();
@@ -4610,7 +4679,8 @@ bool WasmBinaryBuilder::maybeVisitAtomicRMW(Expression*& out, uint8_t code) {
BYN_TRACE("zz node: AtomicRMW\n");
Address readAlign;
- readMemoryAccess(readAlign, curr->offset);
+ Index memIdx = readMemoryAccess(readAlign, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
if (readAlign != curr->bytes) {
throwError("Align of AtomicRMW must match size");
}
@@ -4662,7 +4732,8 @@ bool WasmBinaryBuilder::maybeVisitAtomicCmpxchg(Expression*& out,
BYN_TRACE("zz node: AtomicCmpxchg\n");
Address readAlign;
- readMemoryAccess(readAlign, curr->offset);
+ Index memIdx = readMemoryAccess(readAlign, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
if (readAlign != curr->bytes) {
throwError("Align of AtomicCpxchg must match size");
}
@@ -4697,7 +4768,8 @@ bool WasmBinaryBuilder::maybeVisitAtomicWait(Expression*& out, uint8_t code) {
curr->expected = popNonVoidExpression();
curr->ptr = popNonVoidExpression();
Address readAlign;
- readMemoryAccess(readAlign, curr->offset);
+ Index memIdx = readMemoryAccess(readAlign, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
if (readAlign != curr->expectedType.getByteSize()) {
throwError("Align of AtomicWait must match size");
}
@@ -4717,7 +4789,8 @@ bool WasmBinaryBuilder::maybeVisitAtomicNotify(Expression*& out, uint8_t code) {
curr->notifyCount = popNonVoidExpression();
curr->ptr = popNonVoidExpression();
Address readAlign;
- readMemoryAccess(readAlign, curr->offset);
+ Index memIdx = readMemoryAccess(readAlign, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
if (readAlign != curr->type.getByteSize()) {
throwError("Align of AtomicNotify must match size");
}
@@ -5048,10 +5121,9 @@ bool WasmBinaryBuilder::maybeVisitMemoryInit(Expression*& out, uint32_t code) {
curr->offset = popNonVoidExpression();
curr->dest = popNonVoidExpression();
curr->segment = getU32LEB();
- if (getInt8() != 0) {
- throwError("Unexpected nonzero memory index");
- }
+ Index memIdx = getU32LEB();
curr->finalize();
+ memoryRefs[memIdx].push_back(&curr->memory);
out = curr;
return true;
}
@@ -5075,10 +5147,11 @@ bool WasmBinaryBuilder::maybeVisitMemoryCopy(Expression*& out, uint32_t code) {
curr->size = popNonVoidExpression();
curr->source = popNonVoidExpression();
curr->dest = popNonVoidExpression();
- if (getInt8() != 0 || getInt8() != 0) {
- throwError("Unexpected nonzero memory index");
- }
+ Index destIdx = getU32LEB();
+ Index sourceIdx = getU32LEB();
curr->finalize();
+ memoryRefs[destIdx].push_back(&curr->destMemory);
+ memoryRefs[sourceIdx].push_back(&curr->sourceMemory);
out = curr;
return true;
}
@@ -5091,10 +5164,9 @@ bool WasmBinaryBuilder::maybeVisitMemoryFill(Expression*& out, uint32_t code) {
curr->size = popNonVoidExpression();
curr->value = popNonVoidExpression();
curr->dest = popNonVoidExpression();
- if (getInt8() != 0) {
- throwError("Unexpected nonzero memory index");
- }
+ Index memIdx = getU32LEB();
curr->finalize();
+ memoryRefs[memIdx].push_back(&curr->memory);
out = curr;
return true;
}
@@ -6038,7 +6110,8 @@ bool WasmBinaryBuilder::maybeVisitSIMDStore(Expression*& out, uint32_t code) {
auto* curr = allocator.alloc<Store>();
curr->bytes = 16;
curr->valueType = Type::v128;
- readMemoryAccess(curr->align, curr->offset);
+ Index memIdx = readMemoryAccess(curr->align, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
curr->isAtomic = false;
curr->value = popNonVoidExpression();
curr->ptr = popNonVoidExpression();
@@ -6277,7 +6350,8 @@ bool WasmBinaryBuilder::maybeVisitSIMDLoad(Expression*& out, uint32_t code) {
auto* curr = allocator.alloc<Load>();
curr->type = Type::v128;
curr->bytes = 16;
- readMemoryAccess(curr->align, curr->offset);
+ Index memIdx = readMemoryAccess(curr->align, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
curr->isAtomic = false;
curr->ptr = popNonVoidExpression();
curr->finalize();
@@ -6337,7 +6411,8 @@ bool WasmBinaryBuilder::maybeVisitSIMDLoad(Expression*& out, uint32_t code) {
default:
return false;
}
- readMemoryAccess(curr->align, curr->offset);
+ Index memIdx = readMemoryAccess(curr->align, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
curr->ptr = popNonVoidExpression();
curr->finalize();
out = curr;
@@ -6386,7 +6461,8 @@ bool WasmBinaryBuilder::maybeVisitSIMDLoadStoreLane(Expression*& out,
}
auto* curr = allocator.alloc<SIMDLoadStoreLane>();
curr->op = op;
- readMemoryAccess(curr->align, curr->offset);
+ Index memIdx = readMemoryAccess(curr->align, curr->offset);
+ memoryRefs[memIdx].push_back(&curr->memory);
curr->index = getLaneIndex(lanes);
curr->vec = popNonVoidExpression();
curr->ptr = popNonVoidExpression();
@@ -6427,21 +6503,17 @@ void WasmBinaryBuilder::visitReturn(Return* curr) {
void WasmBinaryBuilder::visitMemorySize(MemorySize* curr) {
BYN_TRACE("zz node: MemorySize\n");
- auto reserved = getU32LEB();
- if (reserved != 0) {
- throwError("Invalid reserved field on memory.size");
- }
+ Index memIdx = getU32LEB();
curr->finalize();
+ memoryRefs[memIdx].push_back(&curr->memory);
}
void WasmBinaryBuilder::visitMemoryGrow(MemoryGrow* curr) {
BYN_TRACE("zz node: MemoryGrow\n");
curr->delta = popNonVoidExpression();
- auto reserved = getU32LEB();
- if (reserved != 0) {
- throwError("Invalid reserved field on memory.grow");
- }
+ Index memIdx = getU32LEB();
curr->finalize();
+ memoryRefs[memIdx].push_back(&curr->memory);
}
void WasmBinaryBuilder::visitNop(Nop* curr) { BYN_TRACE("zz node: Nop\n"); }
@@ -7270,7 +7342,6 @@ void WasmBinaryBuilder::visitRefAs(RefAs* curr, uint8_t code) {
}
curr->finalize();
}
-
void WasmBinaryBuilder::throwError(std::string text) {
throw ParseException(text, 0, pos);
}
diff --git a/src/wasm/wasm-debug.cpp b/src/wasm/wasm-debug.cpp
index 23c2dc938..8de691272 100644
--- a/src/wasm/wasm-debug.cpp
+++ b/src/wasm/wasm-debug.cpp
@@ -1065,7 +1065,8 @@ void writeDWARFSections(Module& wasm, const BinaryLocations& newLocations) {
updateDebugLines(data, locationUpdater);
- updateCompileUnits(info, data, locationUpdater, wasm.memory.is64());
+ bool is64 = wasm.memories.size() > 0 ? wasm.memories[0]->is64() : false;
+ updateCompileUnits(info, data, locationUpdater, is64);
updateRanges(data, locationUpdater);
diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp
index 382ca741d..55551ef58 100644
--- a/src/wasm/wasm-s-parser.cpp
+++ b/src/wasm/wasm-s-parser.cpp
@@ -378,6 +378,7 @@ SExpressionWasmBuilder::SExpressionWasmBuilder(Module& wasm,
auto& s = *module[j];
preParseFunctionType(s);
preParseImports(s);
+ preParseMemory(s);
if (elementStartsWith(s, FUNC) && !isImport(s)) {
implementedFunctions++;
}
@@ -423,20 +424,27 @@ void SExpressionWasmBuilder::preParseImports(Element& curr) {
}
}
+void SExpressionWasmBuilder::preParseMemory(Element& curr) {
+ IString id = curr[0]->str();
+ if (id == MEMORY && !isImport(curr)) {
+ parseMemory(curr);
+ }
+}
+
void SExpressionWasmBuilder::parseModuleElement(Element& curr) {
if (isImport(curr)) {
return; // already done
}
IString id = curr[0]->str();
+ if (id == MEMORY) {
+ return; // already done
+ }
if (id == START) {
return parseStart(curr);
}
if (id == FUNC) {
return parseFunction(curr);
}
- if (id == MEMORY) {
- return parseMemory(curr);
- }
if (id == DATA) {
return parseData(curr);
}
@@ -495,6 +503,31 @@ Name SExpressionWasmBuilder::getTableName(Element& s) {
}
}
+bool SExpressionWasmBuilder::isMemory64(Name memoryName) {
+ auto* memory = wasm.getMemoryOrNull(memoryName);
+ if (!memory) {
+ throw ParseException("invalid memory name in isMemory64");
+ }
+ return memory->is64();
+}
+
+Name SExpressionWasmBuilder::getMemoryNameAtIdx(Index i) {
+ if (i >= memoryNames.size()) {
+ throw ParseException("unknown memory in getMemoryName");
+ }
+ return memoryNames[i];
+}
+
+Name SExpressionWasmBuilder::getMemoryName(Element& s) {
+ if (s.dollared()) {
+ return s.str();
+ } else {
+ // index
+ size_t offset = atoi(s.str().c_str());
+ return getMemoryNameAtIdx(offset);
+ }
+}
+
Name SExpressionWasmBuilder::getGlobalName(Element& s) {
if (s.dollared()) {
return s.str();
@@ -1359,7 +1392,15 @@ Expression* SExpressionWasmBuilder::makeDrop(Element& s) {
Expression* SExpressionWasmBuilder::makeMemorySize(Element& s) {
auto ret = allocator.alloc<MemorySize>();
- if (wasm.memory.is64()) {
+ Index i = 1;
+ Name memory;
+ if (s.size() > 1) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ if (isMemory64(memory)) {
ret->make64();
}
ret->finalize();
@@ -1368,10 +1409,18 @@ Expression* SExpressionWasmBuilder::makeMemorySize(Element& s) {
Expression* SExpressionWasmBuilder::makeMemoryGrow(Element& s) {
auto ret = allocator.alloc<MemoryGrow>();
- if (wasm.memory.is64()) {
+ Index i = 1;
+ Name memory;
+ if (s.size() > 2) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ if (isMemory64(memory)) {
ret->make64();
}
- ret->delta = parseExpression(s[1]);
+ ret->delta = parseExpression(s[i]);
ret->finalize();
return ret;
}
@@ -1820,11 +1869,11 @@ static uint8_t parseMemBytes(const char*& s, uint8_t fallback) {
return ret;
}
-static size_t parseMemAttributes(Element& s,
+static size_t parseMemAttributes(size_t i,
+ Element& s,
Address& offset,
Address& align,
Address fallbackAlign) {
- size_t i = 1;
offset = 0;
align = fallbackAlign;
// Parse "align=X" and "offset=X" arguments, bailing out on anything else.
@@ -1884,6 +1933,17 @@ static const char* findMemExtra(const Element& s, size_t skip, bool isAtomic) {
return ret;
}
+bool SExpressionWasmBuilder::hasMemoryIdx(Element& s,
+ Index defaultSize,
+ Index i) {
+ if (s.size() > defaultSize && !s[i]->isList() &&
+ strncmp(s[i]->c_str(), "align", 5) != 0 &&
+ strncmp(s[i]->c_str(), "offset", 6) != 0) {
+ return true;
+ }
+ return false;
+}
+
Expression*
SExpressionWasmBuilder::makeLoad(Element& s, Type type, bool isAtomic) {
const char* extra = findMemExtra(*s[0], 5 /* after "type.load" */, isAtomic);
@@ -1892,7 +1952,17 @@ SExpressionWasmBuilder::makeLoad(Element& s, Type type, bool isAtomic) {
ret->type = type;
ret->bytes = parseMemBytes(extra, type.getByteSize());
ret->signed_ = extra[0] && extra[1] == 's';
- size_t i = parseMemAttributes(s, ret->offset, ret->align, ret->bytes);
+ Index i = 1;
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 2, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ i = parseMemAttributes(i, s, ret->offset, ret->align, ret->bytes);
ret->ptr = parseExpression(s[i]);
ret->finalize();
return ret;
@@ -1905,7 +1975,17 @@ SExpressionWasmBuilder::makeStore(Element& s, Type type, bool isAtomic) {
ret->isAtomic = isAtomic;
ret->valueType = type;
ret->bytes = parseMemBytes(extra, type.getByteSize());
- size_t i = parseMemAttributes(s, ret->offset, ret->align, ret->bytes);
+ Index i = 1;
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 3, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ i = parseMemAttributes(i, s, ret->offset, ret->align, ret->bytes);
ret->ptr = parseExpression(s[i]);
ret->value = parseExpression(s[i + 1]);
ret->finalize();
@@ -1927,7 +2007,6 @@ Expression* SExpressionWasmBuilder::makeAtomicRMWOrCmpxchg(Element& s,
}
return makeAtomicRMW(s, type, bytes, extra);
}
-
Expression* SExpressionWasmBuilder::makeAtomicRMW(Element& s,
Type type,
uint8_t bytes,
@@ -1950,8 +2029,18 @@ Expression* SExpressionWasmBuilder::makeAtomicRMW(Element& s,
} else {
throw ParseException("bad atomic rmw operator", s.line, s.col);
}
+ Index i = 1;
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 3, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
Address align;
- size_t i = parseMemAttributes(s, ret->offset, align, ret->bytes);
+ i = parseMemAttributes(i, s, ret->offset, align, ret->bytes);
if (align != ret->bytes) {
throw ParseException("Align of Atomic RMW must match size", s.line, s.col);
}
@@ -1968,8 +2057,18 @@ Expression* SExpressionWasmBuilder::makeAtomicCmpxchg(Element& s,
auto ret = allocator.alloc<AtomicCmpxchg>();
ret->type = type;
ret->bytes = bytes;
+ Index i = 1;
Address align;
- size_t i = parseMemAttributes(s, ret->offset, align, ret->bytes);
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 4, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ i = parseMemAttributes(i, s, ret->offset, align, ret->bytes);
if (align != ret->bytes) {
throw ParseException(
"Align of Atomic Cmpxchg must match size", s.line, s.col);
@@ -1994,7 +2093,17 @@ Expression* SExpressionWasmBuilder::makeAtomicWait(Element& s, Type type) {
} else {
WASM_UNREACHABLE("Invalid prefix for memory.atomic.wait");
}
- size_t i = parseMemAttributes(s, ret->offset, align, expectedAlign);
+ Index i = 1;
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 4, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ i = parseMemAttributes(i, s, ret->offset, align, expectedAlign);
if (align != expectedAlign) {
throw ParseException(
"Align of memory.atomic.wait must match size", s.line, s.col);
@@ -2009,8 +2118,18 @@ Expression* SExpressionWasmBuilder::makeAtomicWait(Element& s, Type type) {
Expression* SExpressionWasmBuilder::makeAtomicNotify(Element& s) {
auto ret = allocator.alloc<AtomicNotify>();
ret->type = Type::i32;
+ Index i = 1;
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 3, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
Address align;
- size_t i = parseMemAttributes(s, ret->offset, align, 4);
+ i = parseMemAttributes(i, s, ret->offset, align, 4);
if (align != 4) {
throw ParseException(
"Align of memory.atomic.notify must be 4", s.line, s.col);
@@ -2119,7 +2238,17 @@ Expression* SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op) {
defaultAlign = 8;
break;
}
- size_t i = parseMemAttributes(s, ret->offset, ret->align, defaultAlign);
+ Index i = 1;
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 2, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ i = parseMemAttributes(i, s, ret->offset, ret->align, defaultAlign);
ret->ptr = parseExpression(s[i]);
ret->finalize();
return ret;
@@ -2156,7 +2285,17 @@ SExpressionWasmBuilder::makeSIMDLoadStoreLane(Element& s,
default:
WASM_UNREACHABLE("Unexpected SIMDLoadStoreLane op");
}
- size_t i = parseMemAttributes(s, ret->offset, ret->align, defaultAlign);
+ Index i = 1;
+ Name memory;
+ // Check to make sure there are more than the default args & this str isn't
+ // the mem attributes
+ if (hasMemoryIdx(s, 4, i)) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ i = parseMemAttributes(i, s, ret->offset, ret->align, defaultAlign);
ret->index = parseLaneIndex(s[i++], lanes);
ret->ptr = parseExpression(s[i++]);
ret->vec = parseExpression(s[i]);
@@ -2166,10 +2305,18 @@ SExpressionWasmBuilder::makeSIMDLoadStoreLane(Element& s,
Expression* SExpressionWasmBuilder::makeMemoryInit(Element& s) {
auto ret = allocator.alloc<MemoryInit>();
- ret->segment = atoi(s[1]->str().c_str());
- ret->dest = parseExpression(s[2]);
- ret->offset = parseExpression(s[3]);
- ret->size = parseExpression(s[4]);
+ Index i = 1;
+ Name memory;
+ if (s.size() > 5) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ ret->segment = atoi(s[i++]->str().c_str());
+ ret->dest = parseExpression(s[i++]);
+ ret->offset = parseExpression(s[i++]);
+ ret->size = parseExpression(s[i]);
ret->finalize();
return ret;
}
@@ -2183,18 +2330,38 @@ Expression* SExpressionWasmBuilder::makeDataDrop(Element& s) {
Expression* SExpressionWasmBuilder::makeMemoryCopy(Element& s) {
auto ret = allocator.alloc<MemoryCopy>();
- ret->dest = parseExpression(s[1]);
- ret->source = parseExpression(s[2]);
- ret->size = parseExpression(s[3]);
+ Index i = 1;
+ Name destMemory;
+ Name sourceMemory;
+ if (s.size() > 4) {
+ destMemory = getMemoryName(*s[i++]);
+ sourceMemory = getMemoryName(*s[i++]);
+ } else {
+ destMemory = getMemoryNameAtIdx(0);
+ sourceMemory = getMemoryNameAtIdx(0);
+ }
+ ret->destMemory = destMemory;
+ ret->sourceMemory = sourceMemory;
+ ret->dest = parseExpression(s[i++]);
+ ret->source = parseExpression(s[i++]);
+ ret->size = parseExpression(s[i]);
ret->finalize();
return ret;
}
Expression* SExpressionWasmBuilder::makeMemoryFill(Element& s) {
auto ret = allocator.alloc<MemoryFill>();
- ret->dest = parseExpression(s[1]);
- ret->value = parseExpression(s[2]);
- ret->size = parseExpression(s[3]);
+ Index i = 1;
+ Name memory;
+ if (s.size() > 4) {
+ memory = getMemoryName(*s[i++]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
+ }
+ ret->memory = memory;
+ ret->dest = parseExpression(s[i++]);
+ ret->value = parseExpression(s[i++]);
+ ret->size = parseExpression(s[i]);
ret->finalize();
return ret;
}
@@ -2995,35 +3162,37 @@ void SExpressionWasmBuilder::stringToBinary(const char* input,
data.resize(actual);
}
-Index SExpressionWasmBuilder::parseMemoryIndex(Element& s, Index i) {
+Index SExpressionWasmBuilder::parseMemoryIndex(
+ Element& s, Index i, std::unique_ptr<Memory>& memory) {
if (i < s.size() && s[i]->isStr()) {
if (s[i]->str() == "i64") {
i++;
- wasm.memory.indexType = Type::i64;
+ memory->indexType = Type::i64;
} else if (s[i]->str() == "i32") {
i++;
- wasm.memory.indexType = Type::i32;
+ memory->indexType = Type::i32;
}
}
return i;
}
-Index SExpressionWasmBuilder::parseMemoryLimits(Element& s, Index i) {
- i = parseMemoryIndex(s, i);
+Index SExpressionWasmBuilder::parseMemoryLimits(
+ Element& s, Index i, std::unique_ptr<Memory>& memory) {
+ i = parseMemoryIndex(s, i, memory);
if (i == s.size()) {
throw ParseException("missing memory limits", s.line, s.col);
}
auto initElem = s[i++];
- wasm.memory.initial = getAddress(initElem);
- if (!wasm.memory.is64()) {
- checkAddress(wasm.memory.initial, "excessive memory init", initElem);
+ memory->initial = getAddress(initElem);
+ if (!memory->is64()) {
+ checkAddress(memory->initial, "excessive memory init", initElem);
}
if (i == s.size()) {
- wasm.memory.max = Memory::kUnlimitedSize;
+ memory->max = Memory::kUnlimitedSize;
} else {
auto maxElem = s[i++];
- wasm.memory.max = getAddress(maxElem);
- if (!wasm.memory.is64() && wasm.memory.max > Memory::kMaxSize32) {
+ memory->max = getAddress(maxElem);
+ if (!memory->is64() && memory->max > Memory::kMaxSize32) {
throw ParseException(
"total memory must be <= 4GB", maxElem->line, maxElem->col);
}
@@ -3032,23 +3201,24 @@ Index SExpressionWasmBuilder::parseMemoryLimits(Element& s, Index i) {
}
void SExpressionWasmBuilder::parseMemory(Element& s, bool preParseImport) {
- if (wasm.memory.exists) {
- throw ParseException("too many memories", s.line, s.col);
- }
- wasm.memory.exists = true;
- wasm.memory.shared = false;
+ auto memory = make_unique<Memory>();
+ memory->shared = false;
Index i = 1;
if (s[i]->dollared()) {
- wasm.memory.setExplicitName(s[i++]->str());
+ memory->setExplicitName(s[i++]->str());
+ } else {
+ memory->name = Name::fromInt(memoryCounter++);
}
- i = parseMemoryIndex(s, i);
+ memoryNames.push_back(memory->name);
+
+ i = parseMemoryIndex(s, i, memory);
Name importModule, importBase;
if (s[i]->isList()) {
auto& inner = *s[i];
if (elementStartsWith(inner, EXPORT)) {
auto ex = make_unique<Export>();
ex->name = inner[1]->str();
- ex->value = wasm.memory.name;
+ ex->value = memory->name;
ex->kind = ExternalKind::Memory;
if (wasm.getExportOrNull(ex->name)) {
throw ParseException("duplicate export", inner.line, inner.col);
@@ -3056,33 +3226,36 @@ void SExpressionWasmBuilder::parseMemory(Element& s, bool preParseImport) {
wasm.addExport(ex.release());
i++;
} else if (elementStartsWith(inner, IMPORT)) {
- wasm.memory.module = inner[1]->str();
- wasm.memory.base = inner[2]->str();
+ memory->module = inner[1]->str();
+ memory->base = inner[2]->str();
i++;
} else if (elementStartsWith(inner, SHARED)) {
- wasm.memory.shared = true;
- parseMemoryLimits(inner, 1);
+ memory->shared = true;
+ parseMemoryLimits(inner, 1, memory);
i++;
} else {
if (!(inner.size() > 0 ? inner[0]->str() != IMPORT : true)) {
throw ParseException("bad import ending", inner.line, inner.col);
}
// (memory (data ..)) format
- auto j = parseMemoryIndex(inner, 1);
+ auto j = parseMemoryIndex(inner, 1, memory);
auto offset = allocator.alloc<Const>();
- if (wasm.memory.is64()) {
+ if (memory->is64()) {
offset->set(Literal(int64_t(0)));
} else {
offset->set(Literal(int32_t(0)));
}
- parseInnerData(
- inner, j, Name::fromInt(dataCounter++), false, offset, false);
- wasm.memory.initial = wasm.dataSegments[0]->data.size();
+ auto seg = Builder::makeDataSegment(
+ Name::fromInt(dataCounter++), memory->name, false, offset);
+ parseInnerData(inner, j, seg);
+ memory->initial = seg->data.size();
+ wasm.addDataSegment(std::move(seg));
+ wasm.addMemory(std::move(memory));
return;
}
}
- if (!wasm.memory.shared) {
- i = parseMemoryLimits(s, i);
+ if (!memory->shared) {
+ i = parseMemoryLimits(s, i, memory);
}
// Parse memory initializers.
@@ -3095,13 +3268,13 @@ void SExpressionWasmBuilder::parseMemory(Element& s, bool preParseImport) {
} else {
auto offsetElem = curr[j++];
offsetValue = getAddress(offsetElem);
- if (!wasm.memory.is64()) {
+ if (!memory->is64()) {
checkAddress(offsetValue, "excessive memory offset", offsetElem);
}
}
const char* input = curr[j]->c_str();
auto* offset = allocator.alloc<Const>();
- if (wasm.memory.is64()) {
+ if (memory->is64()) {
offset->type = Type::i64;
offset->value = Literal(offsetValue);
} else {
@@ -3111,27 +3284,33 @@ void SExpressionWasmBuilder::parseMemory(Element& s, bool preParseImport) {
if (auto size = strlen(input)) {
std::vector<char> data;
stringToBinary(input, size, data);
- auto segment = Builder::makeDataSegment(
- Name::fromInt(dataCounter++), false, offset, data.data(), data.size());
+ auto segment = Builder::makeDataSegment(Name::fromInt(dataCounter++),
+ memory->name,
+ false,
+ offset,
+ data.data(),
+ data.size());
segment->hasExplicitName = false;
- wasm.dataSegments.push_back(std::move(segment));
+ wasm.addDataSegment(std::move(segment));
} else {
- auto segment =
- Builder::makeDataSegment(Name::fromInt(dataCounter++), false, offset);
+ auto segment = Builder::makeDataSegment(
+ Name::fromInt(dataCounter++), memory->name, false, offset);
segment->hasExplicitName = false;
- wasm.dataSegments.push_back(std::move(segment));
+ wasm.addDataSegment(std::move(segment));
}
i++;
}
+ wasm.addMemory(std::move(memory));
}
void SExpressionWasmBuilder::parseData(Element& s) {
- if (!wasm.memory.exists) {
+ if (wasm.memories.empty()) {
throw ParseException("data but no memory", s.line, s.col);
}
Index i = 1;
Name name = Name::fromInt(dataCounter++);
bool hasExplicitName = false;
+ Name memory;
bool isPassive = true;
Expression* offset = nullptr;
@@ -3143,11 +3322,11 @@ void SExpressionWasmBuilder::parseData(Element& s) {
if (s[i]->isList()) {
// Optional (memory <memoryidx>)
if (elementStartsWith(s[i], MEMORY)) {
- // TODO: we're just skipping memory since we have only one. Assign the
- // memory name to the segment when we support multiple memories.
- i += 1;
+ auto& inner = *s[i++];
+ memory = getMemoryName(*inner[1]);
+ } else {
+ memory = getMemoryNameAtIdx(0);
}
-
// Offset expression (offset (<expr>)) | (<expr>)
auto& inner = *s[i++];
if (elementStartsWith(inner, OFFSET)) {
@@ -3158,15 +3337,15 @@ void SExpressionWasmBuilder::parseData(Element& s) {
isPassive = false;
}
- parseInnerData(s, i, name, hasExplicitName, offset, isPassive);
+ auto seg = Builder::makeDataSegment(name, memory, isPassive, offset);
+ seg->hasExplicitName = hasExplicitName;
+ parseInnerData(s, i, seg);
+ wasm.addDataSegment(std::move(seg));
}
void SExpressionWasmBuilder::parseInnerData(Element& s,
Index i,
- Name name,
- bool hasExplicitName,
- Expression* offset,
- bool isPassive) {
+ std::unique_ptr<DataSegment>& seg) {
std::vector<char> data;
while (i < s.size()) {
const char* input = s[i++]->c_str();
@@ -3174,10 +3353,8 @@ void SExpressionWasmBuilder::parseInnerData(Element& s,
stringToBinary(input, size, data);
}
}
- auto curr =
- Builder::makeDataSegment(name, isPassive, offset, data.data(), data.size());
- curr->hasExplicitName = hasExplicitName;
- wasm.dataSegments.push_back(std::move(curr));
+ seg->data.resize(data.size());
+ std::copy_n(data.data(), data.size(), seg->data.begin());
}
void SExpressionWasmBuilder::parseExport(Element& s) {
@@ -3224,10 +3401,6 @@ void SExpressionWasmBuilder::parseImport(Element& s) {
kind = ExternalKind::Function;
} else if (elementStartsWith(*s[3], MEMORY)) {
kind = ExternalKind::Memory;
- if (wasm.memory.exists) {
- throw ParseException("more than one memory", s[3]->line, s[3]->col);
- }
- wasm.memory.exists = true;
} else if (elementStartsWith(*s[3], TABLE)) {
kind = ExternalKind::Table;
} else if (elementStartsWith(*s[3], GLOBAL)) {
@@ -3320,20 +3493,25 @@ void SExpressionWasmBuilder::parseImport(Element& s) {
j++; // funcref
// ends with the table element type
} else if (kind == ExternalKind::Memory) {
- wasm.memory.setName(name, hasExplicitName);
- wasm.memory.module = module;
- wasm.memory.base = base;
+ auto memory = make_unique<Memory>();
+ memory->setName(name, hasExplicitName);
+ memory->module = module;
+ memory->base = base;
+ memoryNames.push_back(name);
+
if (inner[j]->isList()) {
auto& limits = *inner[j];
if (!elementStartsWith(limits, SHARED)) {
throw ParseException(
"bad memory limit declaration", inner[j]->line, inner[j]->col);
}
- wasm.memory.shared = true;
- j = parseMemoryLimits(limits, 1);
+ memory->shared = true;
+ j = parseMemoryLimits(limits, 1, memory);
} else {
- j = parseMemoryLimits(inner, j);
+ j = parseMemoryLimits(inner, j, memory);
}
+
+ wasm.addMemory(std::move(memory));
} else if (kind == ExternalKind::Tag) {
auto tag = make_unique<Tag>();
HeapType tagType;
diff --git a/src/wasm/wasm-stack.cpp b/src/wasm/wasm-stack.cpp
index b887f6ec5..13bd09927 100644
--- a/src/wasm/wasm-stack.cpp
+++ b/src/wasm/wasm-stack.cpp
@@ -234,7 +234,7 @@ void BinaryInstWriter::visitLoad(Load* curr) {
WASM_UNREACHABLE("unexpected type");
}
}
- emitMemoryAccess(curr->align, curr->bytes, curr->offset);
+ emitMemoryAccess(curr->align, curr->bytes, curr->offset, curr->memory);
}
void BinaryInstWriter::visitStore(Store* curr) {
@@ -331,7 +331,7 @@ void BinaryInstWriter::visitStore(Store* curr) {
WASM_UNREACHABLE("unexpected type");
}
}
- emitMemoryAccess(curr->align, curr->bytes, curr->offset);
+ emitMemoryAccess(curr->align, curr->bytes, curr->offset, curr->memory);
}
void BinaryInstWriter::visitAtomicRMW(AtomicRMW* curr) {
@@ -390,7 +390,7 @@ void BinaryInstWriter::visitAtomicRMW(AtomicRMW* curr) {
}
#undef CASE_FOR_OP
- emitMemoryAccess(curr->bytes, curr->bytes, curr->offset);
+ emitMemoryAccess(curr->bytes, curr->bytes, curr->offset, curr->memory);
}
void BinaryInstWriter::visitAtomicCmpxchg(AtomicCmpxchg* curr) {
@@ -432,7 +432,7 @@ void BinaryInstWriter::visitAtomicCmpxchg(AtomicCmpxchg* curr) {
default:
WASM_UNREACHABLE("unexpected type");
}
- emitMemoryAccess(curr->bytes, curr->bytes, curr->offset);
+ emitMemoryAccess(curr->bytes, curr->bytes, curr->offset, curr->memory);
}
void BinaryInstWriter::visitAtomicWait(AtomicWait* curr) {
@@ -440,12 +440,12 @@ void BinaryInstWriter::visitAtomicWait(AtomicWait* curr) {
switch (curr->expectedType.getBasic()) {
case Type::i32: {
o << int8_t(BinaryConsts::I32AtomicWait);
- emitMemoryAccess(4, 4, curr->offset);
+ emitMemoryAccess(4, 4, curr->offset, curr->memory);
break;
}
case Type::i64: {
o << int8_t(BinaryConsts::I64AtomicWait);
- emitMemoryAccess(8, 8, curr->offset);
+ emitMemoryAccess(8, 8, curr->offset, curr->memory);
break;
}
default:
@@ -455,7 +455,7 @@ void BinaryInstWriter::visitAtomicWait(AtomicWait* curr) {
void BinaryInstWriter::visitAtomicNotify(AtomicNotify* curr) {
o << int8_t(BinaryConsts::AtomicPrefix) << int8_t(BinaryConsts::AtomicNotify);
- emitMemoryAccess(4, 4, curr->offset);
+ emitMemoryAccess(4, 4, curr->offset, curr->memory);
}
void BinaryInstWriter::visitAtomicFence(AtomicFence* curr) {
@@ -646,7 +646,8 @@ void BinaryInstWriter::visitSIMDLoad(SIMDLoad* curr) {
break;
}
assert(curr->align);
- emitMemoryAccess(curr->align, /*(unused) bytes=*/0, curr->offset);
+ emitMemoryAccess(
+ curr->align, /*(unused) bytes=*/0, curr->offset, curr->memory);
}
void BinaryInstWriter::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
@@ -678,14 +679,15 @@ void BinaryInstWriter::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
break;
}
assert(curr->align);
- emitMemoryAccess(curr->align, /*(unused) bytes=*/0, curr->offset);
+ emitMemoryAccess(
+ curr->align, /*(unused) bytes=*/0, curr->offset, curr->memory);
o << curr->index;
}
void BinaryInstWriter::visitMemoryInit(MemoryInit* curr) {
o << int8_t(BinaryConsts::MiscPrefix);
o << U32LEB(BinaryConsts::MemoryInit);
- o << U32LEB(curr->segment) << int8_t(0);
+ o << U32LEB(curr->segment) << int8_t(parent.getMemoryIndex(curr->memory));
}
void BinaryInstWriter::visitDataDrop(DataDrop* curr) {
@@ -697,13 +699,14 @@ void BinaryInstWriter::visitDataDrop(DataDrop* curr) {
void BinaryInstWriter::visitMemoryCopy(MemoryCopy* curr) {
o << int8_t(BinaryConsts::MiscPrefix);
o << U32LEB(BinaryConsts::MemoryCopy);
- o << int8_t(0) << int8_t(0);
+ o << int8_t(parent.getMemoryIndex(curr->destMemory))
+ << int8_t(parent.getMemoryIndex(curr->sourceMemory));
}
void BinaryInstWriter::visitMemoryFill(MemoryFill* curr) {
o << int8_t(BinaryConsts::MiscPrefix);
o << U32LEB(BinaryConsts::MemoryFill);
- o << int8_t(0);
+ o << int8_t(parent.getMemoryIndex(curr->memory));
}
void BinaryInstWriter::visitConst(Const* curr) {
@@ -1859,12 +1862,12 @@ void BinaryInstWriter::visitReturn(Return* curr) {
void BinaryInstWriter::visitMemorySize(MemorySize* curr) {
o << int8_t(BinaryConsts::MemorySize);
- o << U32LEB(0); // Reserved flags field
+ o << U32LEB(parent.getMemoryIndex(curr->memory));
}
void BinaryInstWriter::visitMemoryGrow(MemoryGrow* curr) {
o << int8_t(BinaryConsts::MemoryGrow);
- o << U32LEB(0); // Reserved flags field
+ o << U32LEB(parent.getMemoryIndex(curr->memory));
}
void BinaryInstWriter::visitRefNull(RefNull* curr) {
@@ -2476,8 +2479,19 @@ void BinaryInstWriter::setScratchLocals() {
void BinaryInstWriter::emitMemoryAccess(size_t alignment,
size_t bytes,
- uint32_t offset) {
- o << U32LEB(Bits::log2(alignment ? alignment : bytes));
+ uint32_t offset,
+ Name memory) {
+ uint32_t alignmentBits = Bits::log2(alignment ? alignment : bytes);
+ uint32_t memoryIdx = parent.getMemoryIndex(memory);
+ if (memoryIdx > 0) {
+ // Set bit 6 in the alignment to indicate a memory index is present per:
+ // https://github.com/WebAssembly/multi-memory/blob/main/proposals/multi-memory/Overview.md
+ alignmentBits = alignmentBits | 1 << 6;
+ }
+ o << U32LEB(alignmentBits);
+ if (memoryIdx > 0) {
+ o << U32LEB(memoryIdx);
+ }
o << U32LEB(offset);
}
diff --git a/src/wasm/wasm-validator.cpp b/src/wasm/wasm-validator.cpp
index b7354ff3f..92f6c76bf 100644
--- a/src/wasm/wasm-validator.cpp
+++ b/src/wasm/wasm-validator.cpp
@@ -499,7 +499,10 @@ private:
validateCallParamsAndResult(curr, sigType, curr);
}
- Type indexType() { return getModule()->memory.indexType; }
+ Type indexType(Name memoryName) {
+ auto memory = getModule()->getMemory(memoryName);
+ return memory->indexType;
+ }
};
void FunctionValidator::noteLabelName(Name name) {
@@ -934,8 +937,8 @@ void FunctionValidator::visitGlobalSet(GlobalSet* curr) {
}
void FunctionValidator::visitLoad(Load* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.load memory must exist");
if (curr->isAtomic) {
shouldBeTrue(getModule()->features.hasAtomics(),
curr,
@@ -954,7 +957,7 @@ void FunctionValidator::visitLoad(Load* curr) {
validateAlignment(curr->align, curr->type, curr->bytes, curr->isAtomic, curr);
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"load pointer type must match memory index type");
if (curr->isAtomic) {
@@ -965,8 +968,8 @@ void FunctionValidator::visitLoad(Load* curr) {
}
void FunctionValidator::visitStore(Store* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.store memory must exist");
if (curr->isAtomic) {
shouldBeTrue(getModule()->features.hasAtomics(),
curr,
@@ -986,7 +989,7 @@ void FunctionValidator::visitStore(Store* curr) {
curr->align, curr->valueType, curr->bytes, curr->isAtomic, curr);
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"store pointer must match memory index type");
shouldBeUnequal(curr->value->type,
@@ -1002,15 +1005,15 @@ void FunctionValidator::visitStore(Store* curr) {
}
void FunctionValidator::visitAtomicRMW(AtomicRMW* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.atomicRMW memory must exist");
shouldBeTrue(getModule()->features.hasAtomics(),
curr,
"Atomic operation (atomics are disabled)");
validateMemBytes(curr->bytes, curr->type, curr);
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"AtomicRMW pointer type must match memory index type");
shouldBeEqualOrFirstIsUnreachable(curr->type,
@@ -1022,15 +1025,15 @@ void FunctionValidator::visitAtomicRMW(AtomicRMW* curr) {
}
void FunctionValidator::visitAtomicCmpxchg(AtomicCmpxchg* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.atomicCmpxchg memory must exist");
shouldBeTrue(getModule()->features.hasAtomics(),
curr,
"Atomic operation (atomics are disabled)");
validateMemBytes(curr->bytes, curr->type, curr);
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"cmpxchg pointer must match memory index type");
if (curr->expected->type != Type::unreachable &&
@@ -1055,8 +1058,8 @@ void FunctionValidator::visitAtomicCmpxchg(AtomicCmpxchg* curr) {
}
void FunctionValidator::visitAtomicWait(AtomicWait* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.atomicWait memory must exist");
shouldBeTrue(getModule()->features.hasAtomics(),
curr,
"Atomic operation (atomics are disabled)");
@@ -1064,7 +1067,7 @@ void FunctionValidator::visitAtomicWait(AtomicWait* curr) {
curr->type, Type(Type::i32), curr, "AtomicWait must have type i32");
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"AtomicWait pointer must match memory index type");
shouldBeIntOrUnreachable(
@@ -1081,8 +1084,8 @@ void FunctionValidator::visitAtomicWait(AtomicWait* curr) {
}
void FunctionValidator::visitAtomicNotify(AtomicNotify* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.atomicNotify memory must exist");
shouldBeTrue(getModule()->features.hasAtomics(),
curr,
"Atomic operation (atomics are disabled)");
@@ -1090,7 +1093,7 @@ void FunctionValidator::visitAtomicNotify(AtomicNotify* curr) {
curr->type, Type(Type::i32), curr, "AtomicNotify must have type i32");
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"AtomicNotify pointer must match memory index type");
shouldBeEqualOrFirstIsUnreachable(
@@ -1101,8 +1104,8 @@ void FunctionValidator::visitAtomicNotify(AtomicNotify* curr) {
}
void FunctionValidator::visitAtomicFence(AtomicFence* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ shouldBeFalse(
+ getModule()->memories.empty(), curr, "Memory operations require a memory");
shouldBeTrue(getModule()->features.hasAtomics(),
curr,
"Atomic operation (atomics are disabled)");
@@ -1240,15 +1243,15 @@ void FunctionValidator::visitSIMDShift(SIMDShift* curr) {
}
void FunctionValidator::visitSIMDLoad(SIMDLoad* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.SIMDLoad memory must exist");
shouldBeTrue(
getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
shouldBeEqualOrFirstIsUnreachable(
curr->type, Type(Type::v128), curr, "load_splat must have type v128");
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"load_splat address must match memory index type");
Type memAlignType = Type::none;
@@ -1275,8 +1278,8 @@ void FunctionValidator::visitSIMDLoad(SIMDLoad* curr) {
}
void FunctionValidator::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.SIMDLoadStoreLane memory must exist");
shouldBeTrue(
getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
if (curr->isLoad()) {
@@ -1288,7 +1291,7 @@ void FunctionValidator::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
}
shouldBeEqualOrFirstIsUnreachable(
curr->ptr->type,
- indexType(),
+ indexType(curr->memory),
curr,
"loadX_lane or storeX_lane address must match memory index type");
shouldBeEqualOrFirstIsUnreachable(
@@ -1335,7 +1338,7 @@ void FunctionValidator::visitMemoryInit(MemoryInit* curr) {
curr->type, Type(Type::none), curr, "memory.init must have type none");
shouldBeEqualOrFirstIsUnreachable(
curr->dest->type,
- indexType(),
+ indexType(curr->memory),
curr,
"memory.init dest must match memory index type");
shouldBeEqualOrFirstIsUnreachable(curr->offset->type,
@@ -1344,9 +1347,8 @@ void FunctionValidator::visitMemoryInit(MemoryInit* curr) {
"memory.init offset must be an i32");
shouldBeEqualOrFirstIsUnreachable(
curr->size->type, Type(Type::i32), curr, "memory.init size must be an i32");
- if (!shouldBeTrue(getModule()->memory.exists,
- curr,
- "Memory operations require a memory")) {
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ if (!shouldBeTrue(!!memory, curr, "memory.init memory must exist")) {
return;
}
shouldBeTrue(curr->segment < getModule()->dataSegments.size(),
@@ -1360,9 +1362,9 @@ void FunctionValidator::visitDataDrop(DataDrop* curr) {
"Bulk memory operation (bulk memory is disabled)");
shouldBeEqualOrFirstIsUnreachable(
curr->type, Type(Type::none), curr, "data.drop must have type none");
- if (!shouldBeTrue(getModule()->memory.exists,
- curr,
- "Memory operations require a memory")) {
+ if (!shouldBeFalse(getModule()->memories.empty(),
+ curr,
+ "Memory operations require a memory")) {
return;
}
shouldBeTrue(curr->segment < getModule()->dataSegments.size(),
@@ -1376,23 +1378,30 @@ void FunctionValidator::visitMemoryCopy(MemoryCopy* curr) {
"Bulk memory operation (bulk memory is disabled)");
shouldBeEqualOrFirstIsUnreachable(
curr->type, Type(Type::none), curr, "memory.copy must have type none");
+ auto* destMemory = getModule()->getMemoryOrNull(curr->destMemory);
+ shouldBeTrue(!!destMemory, curr, "memory.copy destMemory must exist");
+ auto* sourceMemory = getModule()->getMemoryOrNull(curr->sourceMemory);
+ shouldBeTrue(!!sourceMemory, curr, "memory.copy sourceMemory must exist");
shouldBeEqualOrFirstIsUnreachable(
curr->dest->type,
- indexType(),
+ indexType(curr->destMemory),
curr,
- "memory.copy dest must match memory index type");
+ "memory.copy dest must match destMemory index type");
shouldBeEqualOrFirstIsUnreachable(
curr->source->type,
- indexType(),
+ indexType(curr->sourceMemory),
curr,
- "memory.copy source must match memory index type");
+ "memory.copy source must match sourceMemory index type");
shouldBeEqualOrFirstIsUnreachable(
curr->size->type,
- indexType(),
+ indexType(curr->destMemory),
curr,
- "memory.copy size must match memory index type");
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ "memory.copy size must match destMemory index type");
+ shouldBeEqualOrFirstIsUnreachable(
+ curr->size->type,
+ indexType(curr->sourceMemory),
+ curr,
+ "memory.copy size must match destMemory index type");
}
void FunctionValidator::visitMemoryFill(MemoryFill* curr) {
@@ -1403,7 +1412,7 @@ void FunctionValidator::visitMemoryFill(MemoryFill* curr) {
curr->type, Type(Type::none), curr, "memory.fill must have type none");
shouldBeEqualOrFirstIsUnreachable(
curr->dest->type,
- indexType(),
+ indexType(curr->memory),
curr,
"memory.fill dest must match memory index type");
shouldBeEqualOrFirstIsUnreachable(curr->value->type,
@@ -1412,11 +1421,11 @@ void FunctionValidator::visitMemoryFill(MemoryFill* curr) {
"memory.fill value must be an i32");
shouldBeEqualOrFirstIsUnreachable(
curr->size->type,
- indexType(),
+ indexType(curr->memory),
curr,
"memory.fill size must match memory index type");
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.fill memory must exist");
}
void FunctionValidator::validateMemBytes(uint8_t bytes,
@@ -2020,15 +2029,15 @@ void FunctionValidator::visitReturn(Return* curr) {
}
void FunctionValidator::visitMemorySize(MemorySize* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.size memory must exist");
}
void FunctionValidator::visitMemoryGrow(MemoryGrow* curr) {
- shouldBeTrue(
- getModule()->memory.exists, curr, "Memory operations require a memory");
+ auto* memory = getModule()->getMemoryOrNull(curr->memory);
+ shouldBeTrue(!!memory, curr, "memory.grow memory must exist");
shouldBeEqualOrFirstIsUnreachable(curr->delta->type,
- indexType(),
+ indexType(curr->memory),
curr,
"memory.grow must match memory index type");
}
@@ -2940,7 +2949,7 @@ static void validateExports(Module& module, ValidationInfo& info) {
name,
"module table exports must be found");
} else if (exp->kind == ExternalKind::Memory) {
- info.shouldBeTrue(name == Name("0") || name == module.memory.name,
+ info.shouldBeTrue(module.getMemoryOrNull(name),
name,
"module memory exports must be found");
} else if (exp->kind == ExternalKind::Tag) {
@@ -2982,25 +2991,28 @@ static void validateGlobals(Module& module, ValidationInfo& info) {
}
static void validateMemory(Module& module, ValidationInfo& info) {
- auto& curr = module.memory;
+ if (module.memories.empty()) {
+ return;
+ }
+ auto& curr = module.memories[0];
info.shouldBeFalse(
- curr.initial > curr.max, "memory", "memory max >= initial");
- if (curr.is64()) {
+ curr->initial > curr->max, "memory", "memory max >= initial");
+ if (curr->is64()) {
info.shouldBeTrue(module.features.hasMemory64(),
"memory",
"memory is 64-bit, but memory64 is disabled");
} else {
- info.shouldBeTrue(curr.initial <= Memory::kMaxSize32,
+ info.shouldBeTrue(curr->initial <= Memory::kMaxSize32,
"memory",
"initial memory must be <= 4GB");
- info.shouldBeTrue(!curr.hasMax() || curr.max <= Memory::kMaxSize32,
+ info.shouldBeTrue(!curr->hasMax() || curr->max <= Memory::kMaxSize32,
"memory",
"max memory must be <= 4GB, or unlimited");
}
- info.shouldBeTrue(!curr.shared || curr.hasMax(),
+ info.shouldBeTrue(!curr->shared || curr->hasMax(),
"memory",
"shared memory must have max size");
- if (curr.shared) {
+ if (curr->shared) {
info.shouldBeTrue(module.features.hasAtomics(),
"memory",
"memory is shared, but atomics are disabled");
@@ -3016,7 +3028,7 @@ static void validateMemory(Module& module, ValidationInfo& info) {
segment->offset,
"passive segment should not have an offset");
} else {
- if (curr.is64()) {
+ if (curr->is64()) {
if (!info.shouldBeEqual(segment->offset->type,
Type(Type::i64),
segment->offset,
@@ -3033,14 +3045,14 @@ static void validateMemory(Module& module, ValidationInfo& info) {
}
info.shouldBeTrue(checkSegmentOffset(segment->offset,
segment->data.size(),
- curr.initial * Memory::kPageSize,
+ curr->initial * Memory::kPageSize,
module.features),
segment->offset,
"memory segment offset should be reasonable");
if (segment->offset->is<Const>()) {
auto start = segment->offset->cast<Const>()->value.getUnsigned();
auto end = start + size;
- info.shouldBeTrue(end <= curr.initial * Memory::kPageSize,
+ info.shouldBeTrue(end <= curr->initial * Memory::kPageSize,
segment->data.size(),
"segment size should fit in memory (end)");
}
@@ -3049,8 +3061,8 @@ static void validateMemory(Module& module, ValidationInfo& info) {
// If the memory is imported we don't actually know its initial size.
// Specifically wasm dll's import a zero sized memory which is perfectly
// valid.
- if (!curr.imported()) {
- info.shouldBeTrue(size <= curr.initial * Memory::kPageSize,
+ if (!curr->imported()) {
+ info.shouldBeTrue(size <= curr->initial * Memory::kPageSize,
segment->data.size(),
"segment size should fit in memory (initial)");
}
diff --git a/src/wasm/wasm.cpp b/src/wasm/wasm.cpp
index 1c9c1389d..574eb5c47 100644
--- a/src/wasm/wasm.cpp
+++ b/src/wasm/wasm.cpp
@@ -1336,6 +1336,10 @@ ElementSegment* Module::getElementSegment(Name name) {
return getModuleElement(elementSegmentsMap, name, "getElementSegment");
}
+Memory* Module::getMemory(Name name) {
+ return getModuleElement(memoriesMap, name, "getMemory");
+}
+
DataSegment* Module::getDataSegment(Name name) {
return getModuleElement(dataSegmentsMap, name, "getDataSegment");
}
@@ -1373,6 +1377,10 @@ ElementSegment* Module::getElementSegmentOrNull(Name name) {
return getModuleElementOrNull(elementSegmentsMap, name);
}
+Memory* Module::getMemoryOrNull(Name name) {
+ return getModuleElementOrNull(memoriesMap, name);
+}
+
DataSegment* Module::getDataSegmentOrNull(Name name) {
return getModuleElementOrNull(dataSegmentsMap, name);
}
@@ -1452,6 +1460,10 @@ Module::addElementSegment(std::unique_ptr<ElementSegment>&& curr) {
elementSegments, elementSegmentsMap, std::move(curr), "addElementSegment");
}
+Memory* Module::addMemory(std::unique_ptr<Memory>&& curr) {
+ return addModuleElement(memories, memoriesMap, std::move(curr), "addMemory");
+}
+
DataSegment* Module::addDataSegment(std::unique_ptr<DataSegment>&& curr) {
return addModuleElement(
dataSegments, dataSegmentsMap, std::move(curr), "addDataSegment");
@@ -1490,6 +1502,9 @@ void Module::removeTable(Name name) {
void Module::removeElementSegment(Name name) {
removeModuleElement(elementSegments, elementSegmentsMap, name);
}
+void Module::removeMemory(Name name) {
+ removeModuleElement(memories, memoriesMap, name);
+}
void Module::removeDataSegment(Name name) {
removeModuleElement(dataSegments, dataSegmentsMap, name);
}
@@ -1526,6 +1541,9 @@ void Module::removeTables(std::function<bool(Table*)> pred) {
void Module::removeElementSegments(std::function<bool(ElementSegment*)> pred) {
removeModuleElements(elementSegments, elementSegmentsMap, pred);
}
+void Module::removeMemories(std::function<bool(Memory*)> pred) {
+ removeModuleElements(memories, memoriesMap, pred);
+}
void Module::removeDataSegments(std::function<bool(DataSegment*)> pred) {
removeModuleElements(dataSegments, dataSegmentsMap, pred);
}
@@ -1536,6 +1554,13 @@ void Module::removeTags(std::function<bool(Tag*)> pred) {
removeModuleElements(tags, tagsMap, pred);
}
+void Module::updateDataSegmentsMap() {
+ dataSegmentsMap.clear();
+ for (auto& curr : dataSegments) {
+ dataSegmentsMap[curr->name] = curr.get();
+ }
+}
+
void Module::updateMaps() {
functionsMap.clear();
for (auto& curr : functions) {
@@ -1553,10 +1578,11 @@ void Module::updateMaps() {
for (auto& curr : elementSegments) {
elementSegmentsMap[curr->name] = curr.get();
}
- dataSegmentsMap.clear();
- for (auto& curr : dataSegments) {
- dataSegmentsMap[curr->name] = curr.get();
+ memoriesMap.clear();
+ for (auto& curr : memories) {
+ memoriesMap[curr->name] = curr.get();
}
+ updateDataSegmentsMap();
globalsMap.clear();
for (auto& curr : globals) {
globalsMap[curr->name] = curr.get();
diff --git a/src/wasm2js.h b/src/wasm2js.h
index 442fd0d6e..cc4d363ec 100644
--- a/src/wasm2js.h
+++ b/src/wasm2js.h
@@ -103,7 +103,7 @@ bool hasActiveSegments(Module& wasm) {
}
bool needsBufferView(Module& wasm) {
- if (!wasm.memory.exists) {
+ if (wasm.memories.empty()) {
return false;
}
@@ -414,8 +414,8 @@ Ref Wasm2JSBuilder::processWasm(Module* wasm, Name funcName) {
ValueBuilder::appendArgumentToFunction(asmFunc, ENV);
// add memory import
- if (wasm->memory.exists) {
- if (wasm->memory.imported()) {
+ if (!wasm->memories.empty()) {
+ if (wasm->memories[0]->imported()) {
// find memory and buffer in imports
Ref theVar = ValueBuilder::makeVar();
asmFunc[3]->push_back(theVar);
@@ -423,7 +423,7 @@ Ref Wasm2JSBuilder::processWasm(Module* wasm, Name funcName) {
theVar,
"memory",
ValueBuilder::makeDot(ValueBuilder::makeName(ENV),
- ValueBuilder::makeName(wasm->memory.base)));
+ ValueBuilder::makeName(wasm->memories[0]->base)));
// Assign `buffer = memory.buffer`
Ref buf = ValueBuilder::makeVar();
@@ -436,7 +436,7 @@ Ref Wasm2JSBuilder::processWasm(Module* wasm, Name funcName) {
// If memory is growable, override the imported memory's grow method to
// ensure so that when grow is called from the output it works as expected
- if (wasm->memory.max > wasm->memory.initial) {
+ if (wasm->memories[0]->max > wasm->memories[0]->initial) {
asmFunc[3]->push_back(
ValueBuilder::makeStatement(ValueBuilder::makeBinary(
ValueBuilder::makeDot(ValueBuilder::makeName("memory"),
@@ -452,8 +452,8 @@ Ref Wasm2JSBuilder::processWasm(Module* wasm, Name funcName) {
BUFFER,
ValueBuilder::makeNew(ValueBuilder::makeCall(
ValueBuilder::makeName("ArrayBuffer"),
- ValueBuilder::makeInt(Address::address32_t(wasm->memory.initial.addr *
- Memory::kPageSize)))));
+ ValueBuilder::makeInt(Address::address32_t(
+ wasm->memories[0]->initial.addr * Memory::kPageSize)))));
}
}
@@ -536,7 +536,7 @@ Ref Wasm2JSBuilder::processWasm(Module* wasm, Name funcName) {
}
void Wasm2JSBuilder::addBasics(Ref ast, Module* wasm) {
- if (wasm->memory.exists) {
+ if (!wasm->memories.empty()) {
// heaps, var HEAP8 = new global.Int8Array(buffer); etc
auto addHeap = [&](IString name, IString view) {
Ref theVar = ValueBuilder::makeVar();
@@ -732,7 +732,7 @@ void Wasm2JSBuilder::addExports(Ref ast, Module* wasm) {
Ref growDesc = ValueBuilder::makeObject();
ValueBuilder::appendToObjectWithQuotes(
descs, IString("grow"), growDesc);
- if (wasm->memory.max > wasm->memory.initial) {
+ if (wasm->memories[0]->max > wasm->memories[0]->initial) {
ValueBuilder::appendToObjectWithQuotes(
growDesc,
IString("value"),
@@ -805,7 +805,7 @@ void Wasm2JSBuilder::addExports(Ref ast, Module* wasm) {
Fatal() << "unsupported export type: " << export_->name << "\n";
}
}
- if (wasm->memory.exists) {
+ if (!wasm->memories.empty()) {
addMemoryFuncs(ast, wasm);
}
ast->push_back(
@@ -1474,7 +1474,8 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m,
}
Ref visitStore(Store* curr) {
- if (module->memory.initial < module->memory.max &&
+ if (!module->memories.empty() &&
+ module->memories[0]->initial < module->memories[0]->max &&
curr->type != Type::unreachable) {
// In JS, if memory grows then it is dangerous to write
// HEAP[f()] = ..
@@ -2006,8 +2007,8 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m,
}
Ref visitMemoryGrow(MemoryGrow* curr) {
- if (module->memory.exists &&
- module->memory.max > module->memory.initial) {
+ if (!module->memories.empty() &&
+ module->memories[0]->max > module->memories[0]->initial) {
return ValueBuilder::makeCall(
WASM_MEMORY_GROW,
makeJsCoercion(visit(curr->delta, EXPRESSION_RESULT),
@@ -2382,7 +2383,8 @@ void Wasm2JSBuilder::addMemoryFuncs(Ref ast, Module* wasm) {
JsType::JS_INT)));
ast->push_back(memorySizeFunc);
- if (wasm->memory.max > wasm->memory.initial) {
+ if (!wasm->memories.empty() &&
+ wasm->memories[0]->max > wasm->memories[0]->initial) {
addMemoryGrowFunc(ast, wasm);
}
}
@@ -2482,7 +2484,7 @@ void Wasm2JSBuilder::addMemoryGrowFunc(Ref ast, Module* wasm) {
ValueBuilder::makeName(IString("newBuffer"))));
// apply the changes to the memory import
- if (wasm->memory.imported()) {
+ if (!wasm->memories.empty() && wasm->memories[0]->imported()) {
ValueBuilder::appendToBlock(
block,
ValueBuilder::makeBinary(
@@ -2625,9 +2627,9 @@ void Wasm2JSGlue::emitPostES6() {
//
// Note that the translation here expects that the lower values of this memory
// can be used for conversions, so make sure there's at least one page.
- if (wasm.memory.exists && wasm.memory.imported()) {
+ if (!wasm.memories.empty() && wasm.memories[0]->imported()) {
out << "var mem" << moduleName.str << " = new ArrayBuffer("
- << wasm.memory.initial.addr * Memory::kPageSize << ");\n";
+ << wasm.memories[0]->initial.addr * Memory::kPageSize << ");\n";
}
// Actually invoke the `asmFunc` generated function, passing in all global
@@ -2709,7 +2711,7 @@ void Wasm2JSGlue::emitMemory() {
// If there are no memory segments, we don't need to emit any support code for
// segment creation.
- if ((!wasm.memory.exists) || wasm.dataSegments.empty()) {
+ if (wasm.dataSegments.empty()) {
return;
}