diff options
author | Thomas Lively <tlively@google.com> | 2022-10-20 14:24:11 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-10-20 12:24:11 -0700 |
commit | 7144c922f1cd19a4c55c603c7f57f224e9ec4975 (patch) | |
tree | 8cd37df44eefbb492a528f50cfdb1afc4b5a2a5a /src/wasm/wat-parser.cpp | |
parent | aeed16c568700a5340138cc4c43e5818e8a413fb (diff) | |
download | binaryen-7144c922f1cd19a4c55c603c7f57f224e9ec4975.tar.gz binaryen-7144c922f1cd19a4c55c603c7f57f224e9ec4975.tar.bz2 binaryen-7144c922f1cd19a4c55c603c7f57f224e9ec4975.zip |
[NFC] Avoid re-parsing instruction names (#5171)
Since gen-s-parser.py is essentially a giant table mapping instruction names to
the information necessary to construct the corresponding IR nodes, there should
be no need to further parse instruction names after the code generated by
gen-s-parser.py runs. However, memory instruction parsing still parsed
instruction names to get information such as size and default alignment. The new
parser does not have the ability to parse that information out of instruction
names, so put it in the gen-s-parser.py table instead.
Diffstat (limited to 'src/wasm/wat-parser.cpp')
-rw-r--r-- | src/wasm/wat-parser.cpp | 30 |
1 files changed, 12 insertions, 18 deletions
diff --git a/src/wasm/wat-parser.cpp b/src/wasm/wat-parser.cpp index 8d85e4f45..6bfca9da6 100644 --- a/src/wasm/wat-parser.cpp +++ b/src/wasm/wat-parser.cpp @@ -1555,17 +1555,17 @@ template<typename Ctx> Result<typename Ctx::InstrT> makeThenOrElse(Ctx&, Index); template<typename Ctx> Result<typename Ctx::InstrT> makeConst(Ctx&, Index, Type type); template<typename Ctx> -Result<typename Ctx::InstrT> makeLoad(Ctx&, Index, Type type, bool isAtomic); -template<typename Ctx> -Result<typename Ctx::InstrT> makeStore(Ctx&, Index, Type type, bool isAtomic); +Result<typename Ctx::InstrT> +makeLoad(Ctx&, Index, Type type, bool signed_, int bytes, bool isAtomic); template<typename Ctx> -Result<typename Ctx::InstrT> makeAtomicRMWOrCmpxchg(Ctx&, Index, Type type); +Result<typename Ctx::InstrT> +makeStore(Ctx&, Index, Type type, int bytes, bool isAtomic); template<typename Ctx> Result<typename Ctx::InstrT> -makeAtomicRMW(Ctx&, Index, Type type, uint8_t bytes, const char* extra); +makeAtomicRMW(Ctx&, Index, AtomicRMWOp op, Type type, uint8_t bytes); template<typename Ctx> Result<typename Ctx::InstrT> -makeAtomicCmpxchg(Ctx&, Index, Type type, uint8_t bytes, const char* extra); +makeAtomicCmpxchg(Ctx&, Index, Type type, uint8_t bytes); template<typename Ctx> Result<typename Ctx::InstrT> makeAtomicWait(Ctx&, Index, Type type); template<typename Ctx> @@ -2244,32 +2244,26 @@ Result<typename Ctx::InstrT> makeConst(Ctx& ctx, Index pos, Type type) { } template<typename Ctx> -Result<typename Ctx::InstrT> -makeLoad(Ctx& ctx, Index pos, Type type, bool isAtomic) { +Result<typename Ctx::InstrT> makeLoad( + Ctx& ctx, Index pos, Type type, bool signed_, int bytes, bool isAtomic) { return ctx.in.err("unimplemented instruction"); } template<typename Ctx> Result<typename Ctx::InstrT> -makeStore(Ctx& ctx, Index pos, Type type, bool isAtomic) { +makeStore(Ctx& ctx, Index pos, Type type, int bytes, bool isAtomic) { return ctx.in.err("unimplemented instruction"); } template<typename Ctx> Result<typename Ctx::InstrT> -makeAtomicRMWOrCmpxchg(Ctx& ctx, Index pos, Type type) { +makeAtomicRMW(Ctx& ctx, Index pos, AtomicRMWOp op, Type type, uint8_t bytes) { return ctx.in.err("unimplemented instruction"); } template<typename Ctx> -Result<typename Ctx::InstrT> makeAtomicRMW( - Ctx& ctx, Index pos, Type type, uint8_t bytes, const char* extra) { - return ctx.in.err("unimplemented instruction"); -} - -template<typename Ctx> -Result<typename Ctx::InstrT> makeAtomicCmpxchg( - Ctx& ctx, Index pos, Type type, uint8_t bytes, const char* extra) { +Result<typename Ctx::InstrT> +makeAtomicCmpxchg(Ctx& ctx, Index pos, Type type, uint8_t bytes) { return ctx.in.err("unimplemented instruction"); } |