diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2021-02-25 21:04:23 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2021-02-25 21:04:23 -0700 |
| commit | 0b58b617998b79a765b54f88fbe90ca2798b3d3e (patch) | |
| tree | ca6cc4b6bcc2b93166d196049ee49416afe781ad /src/stage1/tokenizer.cpp | |
| parent | dc325669e360f7a9dfa24f85a62fa386529dade6 (diff) | |
| parent | fd208d9d5913a0929e444deb97b91092c427bb14 (diff) | |
| download | zig-0b58b617998b79a765b54f88fbe90ca2798b3d3e.tar.gz zig-0b58b617998b79a765b54f88fbe90ca2798b3d3e.zip | |
Merge remote-tracking branch 'origin/master' into llvm12
Conflicts:
* src/clang.zig
* src/llvm.zig
- this file got moved to src/llvm/bindings.zig in master branch so I
had to put the new LLVM arch/os enum tags into it.
* lib/std/target.zig, src/stage1/target.cpp
- haiku had an inconsistency with its default target ABI, gnu vs
eabi. In this commit we make it gnu in both places to match the
latest changes by @hoanga.
* src/translate_c.zig
Diffstat (limited to 'src/stage1/tokenizer.cpp')
| -rw-r--r-- | src/stage1/tokenizer.cpp | 20 |
1 files changed, 2 insertions, 18 deletions
diff --git a/src/stage1/tokenizer.cpp b/src/stage1/tokenizer.cpp index 09e87d23be..623169a313 100644 --- a/src/stage1/tokenizer.cpp +++ b/src/stage1/tokenizer.cpp @@ -208,7 +208,6 @@ enum TokenizeState { TokenizeStateSawAmpersand, TokenizeStateSawCaret, TokenizeStateSawBar, - TokenizeStateSawBarBar, TokenizeStateDocComment, TokenizeStateContainerDocComment, TokenizeStateLineComment, @@ -833,19 +832,6 @@ void tokenize(Buf *buf, Tokenization *out) { break; case '|': set_token_id(&t, t.cur_tok, TokenIdBarBar); - t.state = TokenizeStateSawBarBar; - break; - default: - t.pos -= 1; - end_token(&t); - t.state = TokenizeStateStart; - continue; - } - break; - case TokenizeStateSawBarBar: - switch (c) { - case '=': - set_token_id(&t, t.cur_tok, TokenIdBarBarEq); end_token(&t); t.state = TokenizeStateStart; break; @@ -1461,7 +1447,7 @@ void tokenize(Buf *buf, Tokenization *out) { tokenize_error(&t, "unterminated string"); break; } else if (t.cur_tok->id == TokenIdCharLiteral) { - tokenize_error(&t, "unterminated character literal"); + tokenize_error(&t, "unterminated Unicode code point literal"); break; } else { zig_unreachable(); @@ -1470,7 +1456,7 @@ void tokenize(Buf *buf, Tokenization *out) { case TokenizeStateCharLiteral: case TokenizeStateCharLiteralEnd: case TokenizeStateCharLiteralUnicode: - tokenize_error(&t, "unterminated character literal"); + tokenize_error(&t, "unterminated Unicode code point literal"); break; case TokenizeStateSymbol: case TokenizeStateZero: @@ -1500,7 +1486,6 @@ void tokenize(Buf *buf, Tokenization *out) { case TokenizeStateSawMinusPercent: case TokenizeStateLineString: case TokenizeStateLineStringEnd: - case TokenizeStateSawBarBar: case TokenizeStateDocComment: case TokenizeStateContainerDocComment: end_token(&t); @@ -1659,7 +1644,6 @@ const char * token_name(TokenId id) { case TokenIdTimesEq: return "*="; case TokenIdTimesPercent: return "*%"; case TokenIdTimesPercentEq: return "*%="; - case TokenIdBarBarEq: return "||="; case TokenIdCount: zig_unreachable(); } |
