summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xscripts/gen-s-parser.py4
-rw-r--r--src/gen-s-parser.inc39
-rw-r--r--src/ir/effects.h15
-rw-r--r--src/parser/contexts.h25
-rw-r--r--src/parser/parsers.h46
-rw-r--r--src/passes/OptimizeInstructions.cpp13
-rw-r--r--src/passes/Precompute.cpp46
-rw-r--r--src/passes/Print.cpp31
-rw-r--r--src/wasm-binary.h12
-rw-r--r--src/wasm-builder.h14
-rw-r--r--src/wasm-delegations-fields.def2
-rw-r--r--src/wasm-ir-builder.h9
-rw-r--r--src/wasm.h8
-rw-r--r--src/wasm/wasm-binary.cpp40
-rw-r--r--src/wasm/wasm-ir-builder.cpp13
-rw-r--r--src/wasm/wasm-stack.cpp21
-rw-r--r--src/wasm/wasm-validator.cpp10
-rw-r--r--test/lit/basic/gc-atomics.wast149
-rw-r--r--test/lit/passes/optimize-instructions-gc-atomics.wast157
-rw-r--r--test/lit/passes/precompute-gc-atomics.wast72
-rw-r--r--test/lit/passes/vacuum-gc-atomics.wast91
-rw-r--r--test/lit/validation/gc-atomics.wast38
22 files changed, 813 insertions, 42 deletions
diff --git a/scripts/gen-s-parser.py b/scripts/gen-s-parser.py
index d15c07e8e..b5592d433 100755
--- a/scripts/gen-s-parser.py
+++ b/scripts/gen-s-parser.py
@@ -617,7 +617,11 @@ instructions = [
("struct.get", "makeStructGet()"),
("struct.get_s", "makeStructGet(true)"),
("struct.get_u", "makeStructGet(false)"),
+ ("struct.atomic.get", "makeAtomicStructGet()"),
+ ("struct.atomic.get_s", "makeAtomicStructGet(true)"),
+ ("struct.atomic.get_u", "makeAtomicStructGet(false)"),
("struct.set", "makeStructSet()"),
+ ("struct.atomic.set", "makeAtomicStructSet()"),
("array.new", "makeArrayNew(false)"),
("array.new_default", "makeArrayNew(true)"),
("array.new_data", "makeArrayNewData()"),
diff --git a/src/gen-s-parser.inc b/src/gen-s-parser.inc
index 75fda4f7a..a96ee2659 100644
--- a/src/gen-s-parser.inc
+++ b/src/gen-s-parser.inc
@@ -5013,6 +5013,45 @@ switch (buf[0]) {
}
case 'u': {
switch (buf[7]) {
+ case 'a': {
+ switch (buf[14]) {
+ case 'g': {
+ switch (buf[17]) {
+ case '\0':
+ if (op == "struct.atomic.get"sv) {
+ CHECK_ERR(makeAtomicStructGet(ctx, pos, annotations));
+ return Ok{};
+ }
+ goto parse_error;
+ case '_': {
+ switch (buf[18]) {
+ case 's':
+ if (op == "struct.atomic.get_s"sv) {
+ CHECK_ERR(makeAtomicStructGet(ctx, pos, annotations, true));
+ return Ok{};
+ }
+ goto parse_error;
+ case 'u':
+ if (op == "struct.atomic.get_u"sv) {
+ CHECK_ERR(makeAtomicStructGet(ctx, pos, annotations, false));
+ return Ok{};
+ }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 's':
+ if (op == "struct.atomic.set"sv) {
+ CHECK_ERR(makeAtomicStructSet(ctx, pos, annotations));
+ return Ok{};
+ }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'g': {
switch (buf[10]) {
case '\0':
diff --git a/src/ir/effects.h b/src/ir/effects.h
index 716624d64..ee596f67b 100644
--- a/src/ir/effects.h
+++ b/src/ir/effects.h
@@ -872,6 +872,18 @@ private:
if (curr->ref->type.isNullable()) {
parent.implicitTrap = true;
}
+ switch (curr->order) {
+ case MemoryOrder::Unordered:
+ break;
+ case MemoryOrder::SeqCst:
+ // Synchronizes with other threads.
+ parent.isAtomic = true;
+ break;
+ case MemoryOrder::AcqRel:
+ // Only synchronizes if other threads can read the field.
+ parent.isAtomic = curr->ref->type.getHeapType().isShared();
+ break;
+ }
}
void visitStructSet(StructSet* curr) {
if (curr->ref->type.isNull()) {
@@ -883,6 +895,9 @@ private:
if (curr->ref->type.isNullable()) {
parent.implicitTrap = true;
}
+ if (curr->order != MemoryOrder::Unordered) {
+ parent.isAtomic = true;
+ }
}
void visitArrayNew(ArrayNew* curr) {}
void visitArrayNewData(ArrayNewData* curr) {
diff --git a/src/parser/contexts.h b/src/parser/contexts.h
index 3e0bc7c40..a65299eac 100644
--- a/src/parser/contexts.h
+++ b/src/parser/contexts.h
@@ -735,13 +735,20 @@ struct NullInstrParserCtx {
return Ok{};
}
template<typename HeapTypeT>
- Result<> makeStructGet(
- Index, const std::vector<Annotation>&, HeapTypeT, FieldIdxT, bool) {
+ Result<> makeStructGet(Index,
+ const std::vector<Annotation>&,
+ HeapTypeT,
+ FieldIdxT,
+ bool,
+ MemoryOrder = MemoryOrder::Unordered) {
return Ok{};
}
template<typename HeapTypeT>
- Result<>
- makeStructSet(Index, const std::vector<Annotation>&, HeapTypeT, FieldIdxT) {
+ Result<> makeStructSet(Index,
+ const std::vector<Annotation>&,
+ HeapTypeT,
+ FieldIdxT,
+ MemoryOrder = MemoryOrder::Unordered) {
return Ok{};
}
template<typename HeapTypeT>
@@ -2448,15 +2455,17 @@ struct ParseDefsCtx : TypeParserCtx<ParseDefsCtx> {
const std::vector<Annotation>& annotations,
HeapType type,
Index field,
- bool signed_) {
- return withLoc(pos, irBuilder.makeStructGet(type, field, signed_));
+ bool signed_,
+ MemoryOrder order = MemoryOrder::Unordered) {
+ return withLoc(pos, irBuilder.makeStructGet(type, field, signed_, order));
}
Result<> makeStructSet(Index pos,
const std::vector<Annotation>& annotations,
HeapType type,
- Index field) {
- return withLoc(pos, irBuilder.makeStructSet(type, field));
+ Index field,
+ MemoryOrder order = MemoryOrder::Unordered) {
+ return withLoc(pos, irBuilder.makeStructSet(type, field, order));
}
Result<> makeArrayNew(Index pos,
diff --git a/src/parser/parsers.h b/src/parser/parsers.h
index 2d3321dcd..1f7236403 100644
--- a/src/parser/parsers.h
+++ b/src/parser/parsers.h
@@ -48,6 +48,7 @@ template<typename Ctx> Result<typename Ctx::LimitsT> limits64(Ctx&);
template<typename Ctx> Result<typename Ctx::MemTypeT> memtype(Ctx&);
template<typename Ctx>
Result<typename Ctx::MemTypeT> memtypeContinued(Ctx&, Type addressType);
+template<typename Ctx> Result<MemoryOrder> memorder(Ctx&);
template<typename Ctx> Result<typename Ctx::TableTypeT> tabletype(Ctx&);
template<typename Ctx>
Result<typename Ctx::TableTypeT> tabletypeContinued(Ctx&, Type addressType);
@@ -246,8 +247,15 @@ Result<> makeStructGet(Ctx&,
const std::vector<Annotation>&,
bool signed_ = false);
template<typename Ctx>
+Result<> makeAtomicStructGet(Ctx&,
+ Index,
+ const std::vector<Annotation>&,
+ bool signed_ = false);
+template<typename Ctx>
Result<> makeStructSet(Ctx&, Index, const std::vector<Annotation>&);
template<typename Ctx>
+Result<> makeAtomicStructSet(Ctx&, Index, const std::vector<Annotation>&);
+template<typename Ctx>
Result<>
makeArrayNew(Ctx&, Index, const std::vector<Annotation>&, bool default_);
template<typename Ctx>
@@ -801,6 +809,17 @@ Result<typename Ctx::MemTypeT> memtypeContinued(Ctx& ctx, Type addressType) {
return ctx.makeMemType(addressType, *limits, shared);
}
+// memorder ::= '' | 'seqcst' | 'acqrel'
+template<typename Ctx> Result<MemoryOrder> memorder(Ctx& ctx) {
+ if (ctx.in.takeKeyword("seqcst"sv)) {
+ return MemoryOrder::SeqCst;
+ }
+ if (ctx.in.takeKeyword("acqrel"sv)) {
+ return MemoryOrder::AcqRel;
+ }
+ return MemoryOrder::SeqCst;
+}
+
// tabletype ::= (limits32 | 'i32' limits32 | 'i64' limit64) reftype
template<typename Ctx> Result<typename Ctx::TableTypeT> tabletype(Ctx& ctx) {
Type addressType = Type::i32;
@@ -2225,6 +2244,20 @@ Result<> makeStructGet(Ctx& ctx,
}
template<typename Ctx>
+Result<> makeAtomicStructGet(Ctx& ctx,
+ Index pos,
+ const std::vector<Annotation>& annotations,
+ bool signed_) {
+ auto order = memorder(ctx);
+ CHECK_ERR(order);
+ auto type = typeidx(ctx);
+ CHECK_ERR(type);
+ auto field = fieldidx(ctx, *type);
+ CHECK_ERR(field);
+ return ctx.makeStructGet(pos, annotations, *type, *field, signed_, *order);
+}
+
+template<typename Ctx>
Result<>
makeStructSet(Ctx& ctx, Index pos, const std::vector<Annotation>& annotations) {
auto type = typeidx(ctx);
@@ -2235,6 +2268,19 @@ makeStructSet(Ctx& ctx, Index pos, const std::vector<Annotation>& annotations) {
}
template<typename Ctx>
+Result<> makeAtomicStructSet(Ctx& ctx,
+ Index pos,
+ const std::vector<Annotation>& annotations) {
+ auto order = memorder(ctx);
+ CHECK_ERR(order);
+ auto type = typeidx(ctx);
+ CHECK_ERR(type);
+ auto field = fieldidx(ctx, *type);
+ CHECK_ERR(field);
+ return ctx.makeStructSet(pos, annotations, *type, *field, *order);
+}
+
+template<typename Ctx>
Result<> makeArrayNew(Ctx& ctx,
Index pos,
const std::vector<Annotation>& annotations,
diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp
index 792cf6235..6a528d74f 100644
--- a/src/passes/OptimizeInstructions.cpp
+++ b/src/passes/OptimizeInstructions.cpp
@@ -1831,6 +1831,12 @@ struct OptimizeInstructions
void visitStructGet(StructGet* curr) {
skipNonNullCast(curr->ref, curr);
trapOnNull(curr, curr->ref);
+ // Relax acquire loads of unshared fields to unordered because they cannot
+ // synchronize with other threads.
+ if (curr->order == MemoryOrder::AcqRel && curr->ref->type.isRef() &&
+ !curr->ref->type.getHeapType().isShared()) {
+ curr->order = MemoryOrder::Unordered;
+ }
}
void visitStructSet(StructSet* curr) {
@@ -1847,6 +1853,13 @@ struct OptimizeInstructions
optimizeStoredValue(curr->value, fields[curr->index].getByteSize());
}
}
+
+ // Relax release stores of unshared fields to unordered because they cannot
+ // synchronize with other threads.
+ if (curr->order == MemoryOrder::AcqRel && curr->ref->type.isRef() &&
+ !curr->ref->type.getHeapType().isShared()) {
+ curr->order = MemoryOrder::Unordered;
+ }
}
void visitArrayNew(ArrayNew* curr) {
diff --git a/src/passes/Precompute.cpp b/src/passes/Precompute.cpp
index 0fc0753ae..93f2f1d69 100644
--- a/src/passes/Precompute.cpp
+++ b/src/passes/Precompute.cpp
@@ -134,23 +134,37 @@ public:
}
Flow visitStructSet(StructSet* curr) { return Flow(NONCONSTANT_FLOW); }
Flow visitStructGet(StructGet* curr) {
- if (curr->ref->type != Type::unreachable && !curr->ref->type.isNull()) {
- // If this field is immutable then we may be able to precompute this, as
- // if we also created the data in this function (or it was created in an
- // immutable global) then we know the value in the field. If it is
- // immutable, call the super method which will do the rest here. That
- // includes checking for the data being properly created, as if it was
- // not then we will not have a constant value for it, which means the
- // local.get of that value will stop us.
- auto& field =
- curr->ref->type.getHeapType().getStruct().fields[curr->index];
- if (field.mutable_ == Immutable) {
- return Super::visitStructGet(curr);
- }
+ if (curr->ref->type == Type::unreachable || curr->ref->type.isNull()) {
+ return Flow(NONCONSTANT_FLOW);
}
-
- // Otherwise, we've failed to precompute.
- return Flow(NONCONSTANT_FLOW);
+ switch (curr->order) {
+ case MemoryOrder::Unordered:
+ // This can always be precomputed.
+ break;
+ case MemoryOrder::SeqCst:
+ // This can never be precomputed away because it synchronizes with other
+ // threads.
+ return Flow(NONCONSTANT_FLOW);
+ case MemoryOrder::AcqRel:
+ // This synchronizes only with writes to the same data, so it can still
+ // be precomputed if the data is not shared with other threads.
+ if (curr->ref->type.getHeapType().isShared()) {
+ return Flow(NONCONSTANT_FLOW);
+ }
+ break;
+ }
+ // If this field is immutable then we may be able to precompute this, as
+ // if we also created the data in this function (or it was created in an
+ // immutable global) then we know the value in the field. If it is
+ // immutable, call the super method which will do the rest here. That
+ // includes checking for the data being properly created, as if it was
+ // not then we will not have a constant value for it, which means the
+ // local.get of that value will stop us.
+ auto& field = curr->ref->type.getHeapType().getStruct().fields[curr->index];
+ if (field.mutable_ == Mutable) {
+ return Flow(NONCONSTANT_FLOW);
+ }
+ return Super::visitStructGet(curr);
}
Flow visitArrayNew(ArrayNew* curr) {
auto flow = Super::visitArrayNew(curr);
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp
index 5f2d1cc3d..d70034c85 100644
--- a/src/passes/Print.cpp
+++ b/src/passes/Print.cpp
@@ -2276,24 +2276,47 @@ struct PrintExpressionContents
o << index;
}
}
+ void printMemoryOrder(MemoryOrder order) {
+ switch (order) {
+ // Unordered should have a different base instruction, so there is nothing
+ // to print. We could be explicit and print seqcst, but we choose not to
+ // for more concise output.
+ case MemoryOrder::Unordered:
+ case MemoryOrder::SeqCst:
+ break;
+ case MemoryOrder::AcqRel:
+ o << "acqrel ";
+ break;
+ }
+ }
void visitStructGet(StructGet* curr) {
auto heapType = curr->ref->type.getHeapType();
const auto& field = heapType.getStruct().fields[curr->index];
+ printMedium(o, "struct");
+ if (curr->order != MemoryOrder::Unordered) {
+ printMedium(o, ".atomic");
+ }
if (field.type == Type::i32 && field.packedType != Field::not_packed) {
if (curr->signed_) {
- printMedium(o, "struct.get_s ");
+ printMedium(o, ".get_s ");
} else {
- printMedium(o, "struct.get_u ");
+ printMedium(o, ".get_u ");
}
} else {
- printMedium(o, "struct.get ");
+ printMedium(o, ".get ");
}
+ printMemoryOrder(curr->order);
printHeapType(heapType);
o << ' ';
printFieldName(heapType, curr->index);
}
void visitStructSet(StructSet* curr) {
- printMedium(o, "struct.set ");
+ if (curr->order == MemoryOrder::Unordered) {
+ printMedium(o, "struct.set ");
+ } else {
+ printMedium(o, "struct.atomic.set ");
+ }
+ printMemoryOrder(curr->order);
auto heapType = curr->ref->type.getHeapType();
printHeapType(heapType);
o << ' ';
diff --git a/src/wasm-binary.h b/src/wasm-binary.h
index bece0af8e..7d98302ba 100644
--- a/src/wasm-binary.h
+++ b/src/wasm-binary.h
@@ -1125,6 +1125,15 @@ enum ASTNodes {
I31GetU = 0x1e,
RefI31Shared = 0x1f,
+ // Shared GC Opcodes
+
+ OrderSeqCst = 0x0,
+ OrderAcqRel = 0x1,
+ StructAtomicGet = 0x5c,
+ StructAtomicGetS = 0x5d,
+ StructAtomicGetU = 0x5e,
+ StructAtomicSet = 0x5f,
+
// stringref opcodes
StringConst = 0x82,
@@ -1352,6 +1361,8 @@ public:
void writeField(const Field& field);
+ void writeMemoryOrder(MemoryOrder order);
+
private:
Module* wasm;
BufferWithRandomAccess& o;
@@ -1587,6 +1598,7 @@ public:
Index readMemoryAccess(Address& alignment, Address& offset);
std::tuple<Name, Address, Address> getMemarg();
+ MemoryOrder getMemoryOrder();
[[noreturn]] void throwError(std::string text) {
throw ParseException(text, 0, pos);
diff --git a/src/wasm-builder.h b/src/wasm-builder.h
index 20485f14d..4396bc6df 100644
--- a/src/wasm-builder.h
+++ b/src/wasm-builder.h
@@ -936,21 +936,29 @@ public:
ret->finalize();
return ret;
}
- StructGet*
- makeStructGet(Index index, Expression* ref, Type type, bool signed_ = false) {
+ StructGet* makeStructGet(Index index,
+ Expression* ref,
+ Type type,
+ bool signed_ = false,
+ MemoryOrder order = MemoryOrder::Unordered) {
auto* ret = wasm.allocator.alloc<StructGet>();
ret->index = index;
ret->ref = ref;
ret->type = type;
ret->signed_ = signed_;
+ ret->order = order;
ret->finalize();
return ret;
}
- StructSet* makeStructSet(Index index, Expression* ref, Expression* value) {
+ StructSet* makeStructSet(Index index,
+ Expression* ref,
+ Expression* value,
+ MemoryOrder order = MemoryOrder::Unordered) {
auto* ret = wasm.allocator.alloc<StructSet>();
ret->index = index;
ret->ref = ref;
ret->value = value;
+ ret->order = order;
ret->finalize();
return ret;
}
diff --git a/src/wasm-delegations-fields.def b/src/wasm-delegations-fields.def
index 3be040220..e883763a4 100644
--- a/src/wasm-delegations-fields.def
+++ b/src/wasm-delegations-fields.def
@@ -639,12 +639,14 @@ DELEGATE_FIELD_CASE_START(StructGet)
DELEGATE_FIELD_INT(StructGet, index)
DELEGATE_FIELD_CHILD(StructGet, ref)
DELEGATE_FIELD_INT(StructGet, signed_)
+DELEGATE_FIELD_INT(StructGet, order)
DELEGATE_FIELD_CASE_END(StructGet)
DELEGATE_FIELD_CASE_START(StructSet)
DELEGATE_FIELD_INT(StructSet, index)
DELEGATE_FIELD_CHILD(StructSet, value)
DELEGATE_FIELD_CHILD(StructSet, ref)
+DELEGATE_FIELD_INT(StructSet, order)
DELEGATE_FIELD_CASE_END(StructSet)
DELEGATE_FIELD_CASE_START(ArrayNew)
diff --git a/src/wasm-ir-builder.h b/src/wasm-ir-builder.h
index 250d5d17c..a40e8df82 100644
--- a/src/wasm-ir-builder.h
+++ b/src/wasm-ir-builder.h
@@ -204,8 +204,13 @@ public:
makeBrOn(Index label, BrOnOp op, Type in = Type::none, Type out = Type::none);
Result<> makeStructNew(HeapType type);
Result<> makeStructNewDefault(HeapType type);
- Result<> makeStructGet(HeapType type, Index field, bool signed_);
- Result<> makeStructSet(HeapType type, Index field);
+ Result<> makeStructGet(HeapType type,
+ Index field,
+ bool signed_,
+ MemoryOrder order = MemoryOrder::Unordered);
+ Result<> makeStructSet(HeapType type,
+ Index field,
+ MemoryOrder order = MemoryOrder::Unordered);
Result<> makeArrayNew(HeapType type);
Result<> makeArrayNewDefault(HeapType type);
Result<> makeArrayNewData(HeapType type, Name data);
diff --git a/src/wasm.h b/src/wasm.h
index b3ae82bcf..3f60a67d2 100644
--- a/src/wasm.h
+++ b/src/wasm.h
@@ -65,6 +65,12 @@ struct Address {
}
};
+enum class MemoryOrder {
+ Unordered,
+ SeqCst,
+ AcqRel,
+};
+
enum class IRProfile { Normal, Poppy };
// Operators
@@ -1652,6 +1658,7 @@ public:
Expression* ref;
// Packed fields have a sign.
bool signed_ = false;
+ MemoryOrder order = MemoryOrder::Unordered;
void finalize();
};
@@ -1664,6 +1671,7 @@ public:
Index index;
Expression* ref;
Expression* value;
+ MemoryOrder order = MemoryOrder::Unordered;
void finalize();
};
diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp
index 791dc53d7..b0c5a54ac 100644
--- a/src/wasm/wasm-binary.cpp
+++ b/src/wasm/wasm-binary.cpp
@@ -1737,6 +1737,20 @@ void WasmBinaryWriter::writeField(const Field& field) {
o << U32LEB(field.mutable_);
}
+void WasmBinaryWriter::writeMemoryOrder(MemoryOrder order) {
+ switch (order) {
+ case MemoryOrder::Unordered:
+ break;
+ case MemoryOrder::SeqCst:
+ o << uint8_t(BinaryConsts::OrderSeqCst);
+ return;
+ case MemoryOrder::AcqRel:
+ o << uint8_t(BinaryConsts::OrderAcqRel);
+ return;
+ }
+ WASM_UNREACHABLE("unexpected memory order");
+}
+
// reader
WasmBinaryReader::WasmBinaryReader(Module& wasm,
@@ -3406,6 +3420,21 @@ Result<> WasmBinaryReader::readInst() {
return Err{"expected 0x00 byte immediate on atomic.fence"};
}
return builder.makeAtomicFence();
+ case BinaryConsts::StructAtomicGet:
+ case BinaryConsts::StructAtomicGetS:
+ case BinaryConsts::StructAtomicGetU: {
+ auto order = getMemoryOrder();
+ auto type = getIndexedHeapType();
+ auto field = getU32LEB();
+ bool signed_ = op == BinaryConsts::StructAtomicGetS;
+ return builder.makeStructGet(type, field, signed_, order);
+ }
+ case BinaryConsts::StructAtomicSet: {
+ auto order = getMemoryOrder();
+ auto type = getIndexedHeapType();
+ auto field = getU32LEB();
+ return builder.makeStructSet(type, field, order);
+ }
}
return Err{"unknown atomic operation"};
}
@@ -4952,4 +4981,15 @@ std::tuple<Name, Address, Address> WasmBinaryReader::getMemarg() {
return {getMemoryName(memIdx), alignment, offset};
}
+MemoryOrder WasmBinaryReader::getMemoryOrder() {
+ auto code = getInt8();
+ switch (code) {
+ case BinaryConsts::OrderSeqCst:
+ return MemoryOrder::SeqCst;
+ case BinaryConsts::OrderAcqRel:
+ return MemoryOrder::AcqRel;
+ }
+ throwError("Unrecognized memory order code " + std::to_string(code));
+}
+
} // namespace wasm
diff --git a/src/wasm/wasm-ir-builder.cpp b/src/wasm/wasm-ir-builder.cpp
index 6cd62e439..4b0342410 100644
--- a/src/wasm/wasm-ir-builder.cpp
+++ b/src/wasm/wasm-ir-builder.cpp
@@ -1792,21 +1792,26 @@ Result<> IRBuilder::makeStructNewDefault(HeapType type) {
return Ok{};
}
-Result<> IRBuilder::makeStructGet(HeapType type, Index field, bool signed_) {
+Result<> IRBuilder::makeStructGet(HeapType type,
+ Index field,
+ bool signed_,
+ MemoryOrder order) {
const auto& fields = type.getStruct().fields;
StructGet curr;
CHECK_ERR(ChildPopper{*this}.visitStructGet(&curr, type));
CHECK_ERR(validateTypeAnnotation(type, curr.ref));
- push(builder.makeStructGet(field, curr.ref, fields[field].type, signed_));
+ push(
+ builder.makeStructGet(field, curr.ref, fields[field].type, signed_, order));
return Ok{};
}
-Result<> IRBuilder::makeStructSet(HeapType type, Index field) {
+Result<>
+IRBuilder::makeStructSet(HeapType type, Index field, MemoryOrder order) {
StructSet curr;
curr.index = field;
CHECK_ERR(ChildPopper{*this}.visitStructSet(&curr, type));
CHECK_ERR(validateTypeAnnotation(type, curr.ref));
- push(builder.makeStructSet(field, curr.ref, curr.value));
+ push(builder.makeStructSet(field, curr.ref, curr.value, order));
return Ok{};
}
diff --git a/src/wasm/wasm-stack.cpp b/src/wasm/wasm-stack.cpp
index 61f59c76a..08043b27f 100644
--- a/src/wasm/wasm-stack.cpp
+++ b/src/wasm/wasm-stack.cpp
@@ -2327,15 +2327,20 @@ void BinaryInstWriter::visitStructGet(StructGet* curr) {
}
const auto& heapType = curr->ref->type.getHeapType();
const auto& field = heapType.getStruct().fields[curr->index];
+ bool atomic = curr->order != MemoryOrder::Unordered;
int8_t op;
if (field.type != Type::i32 || field.packedType == Field::not_packed) {
- op = BinaryConsts::StructGet;
+ op = atomic ? BinaryConsts::StructAtomicGet : BinaryConsts::StructGet;
} else if (curr->signed_) {
- op = BinaryConsts::StructGetS;
+ op = atomic ? BinaryConsts::StructAtomicGetS : BinaryConsts::StructGetS;
} else {
- op = BinaryConsts::StructGetU;
+ op = atomic ? BinaryConsts::StructAtomicGetU : BinaryConsts::StructGetU;
+ }
+ auto prefix = atomic ? BinaryConsts::AtomicPrefix : BinaryConsts::GCPrefix;
+ o << int8_t(prefix) << U32LEB(op);
+ if (atomic) {
+ parent.writeMemoryOrder(curr->order);
}
- o << int8_t(BinaryConsts::GCPrefix) << U32LEB(op);
parent.writeIndexedHeapType(heapType);
o << U32LEB(curr->index);
}
@@ -2345,7 +2350,13 @@ void BinaryInstWriter::visitStructSet(StructSet* curr) {
emitUnreachable();
return;
}
- o << int8_t(BinaryConsts::GCPrefix) << U32LEB(BinaryConsts::StructSet);
+ if (curr->order == MemoryOrder::Unordered) {
+ o << int8_t(BinaryConsts::GCPrefix) << U32LEB(BinaryConsts::StructSet);
+ } else {
+ o << int8_t(BinaryConsts::AtomicPrefix)
+ << U32LEB(BinaryConsts::StructAtomicSet);
+ parent.writeMemoryOrder(curr->order);
+ }
parent.writeIndexedHeapType(curr->ref->type.getHeapType());
o << U32LEB(curr->index);
}
diff --git a/src/wasm/wasm-validator.cpp b/src/wasm/wasm-validator.cpp
index 242e07c43..7de69a1ff 100644
--- a/src/wasm/wasm-validator.cpp
+++ b/src/wasm/wasm-validator.cpp
@@ -2989,6 +2989,11 @@ void FunctionValidator::visitStructGet(StructGet* curr) {
shouldBeTrue(getModule()->features.hasGC(),
curr,
"struct.get requires gc [--enable-gc]");
+ shouldBeTrue(curr->order == MemoryOrder::Unordered ||
+ getModule()->features.hasSharedEverything(),
+ curr,
+ "struct.atomic.get requires shared-everything "
+ "[--enable-shared-everything]");
if (curr->type == Type::unreachable || curr->ref->type.isNull()) {
return;
}
@@ -3016,6 +3021,11 @@ void FunctionValidator::visitStructSet(StructSet* curr) {
shouldBeTrue(getModule()->features.hasGC(),
curr,
"struct.set requires gc [--enable-gc]");
+ shouldBeTrue(curr->order == MemoryOrder::Unordered ||
+ getModule()->features.hasSharedEverything(),
+ curr,
+ "struct.atomic.set requires shared-everything "
+ "[--enable-shared-everything]");
if (curr->ref->type == Type::unreachable) {
return;
}
diff --git a/test/lit/basic/gc-atomics.wast b/test/lit/basic/gc-atomics.wast
new file mode 100644
index 000000000..c454b4c99
--- /dev/null
+++ b/test/lit/basic/gc-atomics.wast
@@ -0,0 +1,149 @@
+;; NOTE: Assertions have been generated by update_lit_checks.py and should not be edited.
+
+;; RUN: wasm-opt -all %s -S -o - | filecheck %s
+;; RUN: wasm-opt -all %s --roundtrip -S -o - | filecheck %s
+
+(module
+ ;; CHECK: (type $struct (struct (field (mut i32))))
+ (type $struct (struct (field (mut i32))))
+ ;; CHECK: (type $packed (struct (field (mut i8))))
+ (type $packed (struct (field (mut i8))))
+
+ ;; CHECK: (func $get (type $3) (param $0 (ref null $struct)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get $struct 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get (param (ref null $struct)) (result i32)
+ (struct.atomic.get $struct 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-seqcst (type $3) (param $0 (ref null $struct)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get $struct 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-seqcst (param (ref null $struct)) (result i32)
+ (struct.atomic.get seqcst $struct 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-acqrel (type $3) (param $0 (ref null $struct)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get acqrel $struct 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-acqrel (param (ref null $struct)) (result i32)
+ (struct.atomic.get acqrel $struct 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-s (type $2) (param $0 (ref null $packed)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get_s $packed 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-s (param (ref null $packed)) (result i32)
+ (struct.atomic.get_s $packed 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-s-seqcst (type $2) (param $0 (ref null $packed)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get_s $packed 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-s-seqcst (param (ref null $packed)) (result i32)
+ (struct.atomic.get_s seqcst $packed 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-s-acqrel (type $2) (param $0 (ref null $packed)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get_s acqrel $packed 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-s-acqrel (param (ref null $packed)) (result i32)
+ (struct.atomic.get_s acqrel $packed 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-u (type $2) (param $0 (ref null $packed)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get_u $packed 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-u (param (ref null $packed)) (result i32)
+ (struct.atomic.get_u $packed 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-u-seqcst (type $2) (param $0 (ref null $packed)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get_u $packed 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-u-seqcst (param (ref null $packed)) (result i32)
+ (struct.atomic.get_u seqcst $packed 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $get-u-acqrel (type $2) (param $0 (ref null $packed)) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get_u acqrel $packed 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-u-acqrel (param (ref null $packed)) (result i32)
+ (struct.atomic.get_u acqrel $packed 0
+ (local.get 0)
+ )
+ )
+
+ ;; CHECK: (func $set (type $4) (param $0 (ref null $struct))
+ ;; CHECK-NEXT: (struct.atomic.set $struct 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set (param (ref null $struct))
+ (struct.atomic.set $struct 0
+ (local.get 0)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: (func $set-seqcst (type $4) (param $0 (ref null $struct))
+ ;; CHECK-NEXT: (struct.atomic.set $struct 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-seqcst (param (ref null $struct))
+ (struct.atomic.set seqcst $struct 0
+ (local.get 0)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: (func $set-acqrel (type $4) (param $0 (ref null $struct))
+ ;; CHECK-NEXT: (struct.atomic.set acqrel $struct 0
+ ;; CHECK-NEXT: (local.get $0)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-acqrel (param (ref null $struct))
+ (struct.atomic.set acqrel $struct 0
+ (local.get 0)
+ (i32.const 0)
+ )
+ )
+)
diff --git a/test/lit/passes/optimize-instructions-gc-atomics.wast b/test/lit/passes/optimize-instructions-gc-atomics.wast
new file mode 100644
index 000000000..a0283390c
--- /dev/null
+++ b/test/lit/passes/optimize-instructions-gc-atomics.wast
@@ -0,0 +1,157 @@
+;; NOTE: Assertions have been generated by update_lit_checks.py and should not be edited.
+
+;; RUN: wasm-opt %s -all --optimize-instructions -S -o - | filecheck %s
+
+(module
+ ;; CHECK: (type $unshared (struct (field (mut i32))))
+
+ ;; CHECK: (type $shared (shared (struct (field (mut i32)))))
+ (type $shared (shared (struct (field (mut i32)))))
+ (type $unshared (struct (field (mut i32))))
+
+ ;; CHECK: (func $get-unordered-unshared (type $2) (result i32)
+ ;; CHECK-NEXT: (struct.get $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-unordered-unshared (result i32)
+ (struct.get $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+
+ ;; CHECK: (func $get-unordered-shared (type $2) (result i32)
+ ;; CHECK-NEXT: (struct.get $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-unordered-shared (result i32)
+ (struct.get $shared 0
+ (struct.new_default $shared)
+ )
+ )
+
+ ;; CHECK: (func $get-seqcst-unshared (type $2) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-seqcst-unshared (result i32)
+ (struct.atomic.get seqcst $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+
+ ;; CHECK: (func $get-seqcst-shared (type $2) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-seqcst-shared (result i32)
+ (struct.atomic.get seqcst $shared 0
+ (struct.new_default $shared)
+ )
+ )
+
+ ;; CHECK: (func $get-acqrel-unshared (type $2) (result i32)
+ ;; CHECK-NEXT: (struct.get $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-acqrel-unshared (result i32)
+ ;; This can be relaxed to unordered
+ (struct.atomic.get acqrel $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+
+ ;; CHECK: (func $get-acqrel-shared (type $2) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get acqrel $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-acqrel-shared (result i32)
+ (struct.atomic.get acqrel $shared 0
+ (struct.new_default $shared)
+ )
+ )
+
+ ;; CHECK: (func $set-unordered-unshared (type $3)
+ ;; CHECK-NEXT: (struct.set $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-unordered-unshared
+ (struct.set $unshared 0
+ (struct.new_default $unshared)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: (func $set-unordered-shared (type $3)
+ ;; CHECK-NEXT: (struct.set $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-unordered-shared
+ (struct.set $shared 0
+ (struct.new_default $shared)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: (func $set-seqcst-unshared (type $3)
+ ;; CHECK-NEXT: (struct.atomic.set $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-seqcst-unshared
+ (struct.atomic.set seqcst $unshared 0
+ (struct.new_default $unshared)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: (func $set-seqcst-shared (type $3)
+ ;; CHECK-NEXT: (struct.atomic.set $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-seqcst-shared
+ (struct.atomic.set seqcst $shared 0
+ (struct.new_default $shared)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: (func $set-acqrel-unshared (type $3)
+ ;; CHECK-NEXT: (struct.set $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-acqrel-unshared
+ ;; This can be relaxed to unordered.
+ (struct.atomic.set acqrel $unshared 0
+ (struct.new_default $unshared)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: (func $set-acqrel-shared (type $3)
+ ;; CHECK-NEXT: (struct.atomic.set acqrel $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $set-acqrel-shared
+ (struct.atomic.set acqrel $shared 0
+ (struct.new_default $shared)
+ (i32.const 0)
+ )
+ )
+)
diff --git a/test/lit/passes/precompute-gc-atomics.wast b/test/lit/passes/precompute-gc-atomics.wast
new file mode 100644
index 000000000..1f2d07753
--- /dev/null
+++ b/test/lit/passes/precompute-gc-atomics.wast
@@ -0,0 +1,72 @@
+;; NOTE: Assertions have been generated by update_lit_checks.py and should not be edited.
+
+;; RUN: wasm-opt %s -all --precompute-propagate -S -o - | filecheck %s
+
+(module
+ ;; CHECK: (type $shared (shared (struct (field i32))))
+ (type $shared (shared (struct (field i32))))
+ ;; CHECK: (type $unshared (struct (field i32)))
+ (type $unshared (struct (field i32)))
+
+ ;; CHECK: (func $get-unordered-unshared (type $0) (result i32)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ (func $get-unordered-unshared (result i32)
+ (struct.get $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+
+ ;; CHECK: (func $get-unordered-shared (type $0) (result i32)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ (func $get-unordered-shared (result i32)
+ (struct.get $shared 0
+ (struct.new_default $shared)
+ )
+ )
+
+ ;; CHECK: (func $get-seqcst-unshared (type $0) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-seqcst-unshared (result i32)
+ (struct.atomic.get seqcst $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+
+ ;; CHECK: (func $get-seqcst-shared (type $0) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-seqcst-shared (result i32)
+ (struct.atomic.get seqcst $shared 0
+ (struct.new_default $shared)
+ )
+ )
+
+ ;; CHECK: (func $get-acqrel-unshared (type $0) (result i32)
+ ;; CHECK-NEXT: (i32.const 0)
+ ;; CHECK-NEXT: )
+ (func $get-acqrel-unshared (result i32)
+ ;; We can optimize this because acquire-release on unshared data does not
+ ;; synchronize with anything.
+ (struct.atomic.get acqrel $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+
+ ;; CHECK: (func $get-acqrel-shared (type $0) (result i32)
+ ;; CHECK-NEXT: (struct.atomic.get acqrel $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-acqrel-shared (result i32)
+ (struct.atomic.get acqrel $shared 0
+ (struct.new_default $shared)
+ )
+ )
+)
diff --git a/test/lit/passes/vacuum-gc-atomics.wast b/test/lit/passes/vacuum-gc-atomics.wast
new file mode 100644
index 000000000..49a8a8a6f
--- /dev/null
+++ b/test/lit/passes/vacuum-gc-atomics.wast
@@ -0,0 +1,91 @@
+;; NOTE: Assertions have been generated by update_lit_checks.py and should not be edited.
+
+;; Check that synchronizing operations are considered to have side effects that
+;; prevent them from being dropped.
+
+;; RUN: wasm-opt %s -all --vacuum -S -o - | filecheck %s
+
+(module
+ ;; CHECK: (type $shared (shared (struct (field i32))))
+ (type $shared (shared (struct (field i32))))
+ ;; CHECK: (type $unshared (struct (field i32)))
+ (type $unshared (struct (field i32)))
+
+ ;; CHECK: (func $get-unordered-unshared (type $0)
+ ;; CHECK-NEXT: (nop)
+ ;; CHECK-NEXT: )
+ (func $get-unordered-unshared
+ (drop
+ (struct.get $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+ )
+
+ ;; CHECK: (func $get-unordered-shared (type $0)
+ ;; CHECK-NEXT: (nop)
+ ;; CHECK-NEXT: )
+ (func $get-unordered-shared
+ (drop
+ (struct.get $shared 0
+ (struct.new_default $shared)
+ )
+ )
+ )
+
+ ;; CHECK: (func $get-seqcst-unshared (type $0)
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.atomic.get $unshared 0
+ ;; CHECK-NEXT: (struct.new_default $unshared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-seqcst-unshared
+ (drop
+ (struct.atomic.get seqcst $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+ )
+
+ ;; CHECK: (func $get-seqcst-shared (type $0)
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.atomic.get $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-seqcst-shared
+ (drop
+ (struct.atomic.get seqcst $shared 0
+ (struct.new_default $shared)
+ )
+ )
+ )
+
+ ;; CHECK: (func $get-acqrel-unshared (type $0)
+ ;; CHECK-NEXT: (nop)
+ ;; CHECK-NEXT: )
+ (func $get-acqrel-unshared
+ (drop
+ (struct.atomic.get acqrel $unshared 0
+ (struct.new_default $unshared)
+ )
+ )
+ )
+
+ ;; CHECK: (func $get-acqrel-shared (type $0)
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.atomic.get acqrel $shared 0
+ ;; CHECK-NEXT: (struct.new_default $shared)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $get-acqrel-shared
+ (drop
+ (struct.atomic.get acqrel $shared 0
+ (struct.new_default $shared)
+ )
+ )
+ )
+)
diff --git a/test/lit/validation/gc-atomics.wast b/test/lit/validation/gc-atomics.wast
new file mode 100644
index 000000000..28e98b9fe
--- /dev/null
+++ b/test/lit/validation/gc-atomics.wast
@@ -0,0 +1,38 @@
+;; Test that shared-everything GC instructions require the shared-everything
+;; feature.
+
+;; RUN: not wasm-opt -all --disable-shared-everything %s 2>&1 | filecheck %s
+
+(module
+ (type $struct (struct (field (mut i32))))
+
+ ;; CHECK: struct.atomic.get requires shared-everything [--enable-shared-everything]
+ (func $get-seqcst (result i32)
+ (struct.atomic.get seqcst $struct 0
+ (struct.new_default $struct)
+ )
+ )
+
+ ;; CHECK: struct.atomic.get requires shared-everything [--enable-shared-everything]
+ (func $get-acqrel (result i32)
+ (struct.atomic.get acqrel $struct 0
+ (struct.new_default $struct)
+ )
+ )
+
+ ;; CHECK: struct.atomic.set requires shared-everything [--enable-shared-everything]
+ (func $set-seqcst
+ (struct.atomic.set seqcst $struct 0
+ (struct.new_default $struct)
+ (i32.const 0)
+ )
+ )
+
+ ;; CHECK: struct.atomic.set requires shared-everything [--enable-shared-everything]
+ (func $set-acqrel
+ (struct.atomic.set acqrel $struct 0
+ (struct.new_default $struct)
+ (i32.const 0)
+ )
+ )
+) \ No newline at end of file