diff options
Diffstat (limited to 'src/passes')
-rw-r--r-- | src/passes/OptimizeInstructions.cpp | 13 | ||||
-rw-r--r-- | src/passes/Precompute.cpp | 46 | ||||
-rw-r--r-- | src/passes/Print.cpp | 31 |
3 files changed, 70 insertions, 20 deletions
diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp index 792cf6235..6a528d74f 100644 --- a/src/passes/OptimizeInstructions.cpp +++ b/src/passes/OptimizeInstructions.cpp @@ -1831,6 +1831,12 @@ struct OptimizeInstructions void visitStructGet(StructGet* curr) { skipNonNullCast(curr->ref, curr); trapOnNull(curr, curr->ref); + // Relax acquire loads of unshared fields to unordered because they cannot + // synchronize with other threads. + if (curr->order == MemoryOrder::AcqRel && curr->ref->type.isRef() && + !curr->ref->type.getHeapType().isShared()) { + curr->order = MemoryOrder::Unordered; + } } void visitStructSet(StructSet* curr) { @@ -1847,6 +1853,13 @@ struct OptimizeInstructions optimizeStoredValue(curr->value, fields[curr->index].getByteSize()); } } + + // Relax release stores of unshared fields to unordered because they cannot + // synchronize with other threads. + if (curr->order == MemoryOrder::AcqRel && curr->ref->type.isRef() && + !curr->ref->type.getHeapType().isShared()) { + curr->order = MemoryOrder::Unordered; + } } void visitArrayNew(ArrayNew* curr) { diff --git a/src/passes/Precompute.cpp b/src/passes/Precompute.cpp index 0fc0753ae..93f2f1d69 100644 --- a/src/passes/Precompute.cpp +++ b/src/passes/Precompute.cpp @@ -134,23 +134,37 @@ public: } Flow visitStructSet(StructSet* curr) { return Flow(NONCONSTANT_FLOW); } Flow visitStructGet(StructGet* curr) { - if (curr->ref->type != Type::unreachable && !curr->ref->type.isNull()) { - // If this field is immutable then we may be able to precompute this, as - // if we also created the data in this function (or it was created in an - // immutable global) then we know the value in the field. If it is - // immutable, call the super method which will do the rest here. That - // includes checking for the data being properly created, as if it was - // not then we will not have a constant value for it, which means the - // local.get of that value will stop us. - auto& field = - curr->ref->type.getHeapType().getStruct().fields[curr->index]; - if (field.mutable_ == Immutable) { - return Super::visitStructGet(curr); - } + if (curr->ref->type == Type::unreachable || curr->ref->type.isNull()) { + return Flow(NONCONSTANT_FLOW); } - - // Otherwise, we've failed to precompute. - return Flow(NONCONSTANT_FLOW); + switch (curr->order) { + case MemoryOrder::Unordered: + // This can always be precomputed. + break; + case MemoryOrder::SeqCst: + // This can never be precomputed away because it synchronizes with other + // threads. + return Flow(NONCONSTANT_FLOW); + case MemoryOrder::AcqRel: + // This synchronizes only with writes to the same data, so it can still + // be precomputed if the data is not shared with other threads. + if (curr->ref->type.getHeapType().isShared()) { + return Flow(NONCONSTANT_FLOW); + } + break; + } + // If this field is immutable then we may be able to precompute this, as + // if we also created the data in this function (or it was created in an + // immutable global) then we know the value in the field. If it is + // immutable, call the super method which will do the rest here. That + // includes checking for the data being properly created, as if it was + // not then we will not have a constant value for it, which means the + // local.get of that value will stop us. + auto& field = curr->ref->type.getHeapType().getStruct().fields[curr->index]; + if (field.mutable_ == Mutable) { + return Flow(NONCONSTANT_FLOW); + } + return Super::visitStructGet(curr); } Flow visitArrayNew(ArrayNew* curr) { auto flow = Super::visitArrayNew(curr); diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp index 5f2d1cc3d..d70034c85 100644 --- a/src/passes/Print.cpp +++ b/src/passes/Print.cpp @@ -2276,24 +2276,47 @@ struct PrintExpressionContents o << index; } } + void printMemoryOrder(MemoryOrder order) { + switch (order) { + // Unordered should have a different base instruction, so there is nothing + // to print. We could be explicit and print seqcst, but we choose not to + // for more concise output. + case MemoryOrder::Unordered: + case MemoryOrder::SeqCst: + break; + case MemoryOrder::AcqRel: + o << "acqrel "; + break; + } + } void visitStructGet(StructGet* curr) { auto heapType = curr->ref->type.getHeapType(); const auto& field = heapType.getStruct().fields[curr->index]; + printMedium(o, "struct"); + if (curr->order != MemoryOrder::Unordered) { + printMedium(o, ".atomic"); + } if (field.type == Type::i32 && field.packedType != Field::not_packed) { if (curr->signed_) { - printMedium(o, "struct.get_s "); + printMedium(o, ".get_s "); } else { - printMedium(o, "struct.get_u "); + printMedium(o, ".get_u "); } } else { - printMedium(o, "struct.get "); + printMedium(o, ".get "); } + printMemoryOrder(curr->order); printHeapType(heapType); o << ' '; printFieldName(heapType, curr->index); } void visitStructSet(StructSet* curr) { - printMedium(o, "struct.set "); + if (curr->order == MemoryOrder::Unordered) { + printMedium(o, "struct.set "); + } else { + printMedium(o, "struct.atomic.set "); + } + printMemoryOrder(curr->order); auto heapType = curr->ref->type.getHeapType(); printHeapType(heapType); o << ' '; |