aboutsummaryrefslogtreecommitdiff
path: root/src/tokenizer.cpp
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2019-10-06 16:39:27 -0400
committerAndrew Kelley <andrew@ziglang.org>2019-10-06 16:39:27 -0400
commit8e2c441b2eae7af27fe311c4539f6932a7f47cea (patch)
tree9bb0bd70760ab320431ada61169520689a65a4c1 /src/tokenizer.cpp
parent86171afb9b7d7f1c1d5444f894e273ca40e87147 (diff)
downloadzig-8e2c441b2eae7af27fe311c4539f6932a7f47cea.tar.gz
zig-8e2c441b2eae7af27fe311c4539f6932a7f47cea.zip
stage1 parser supports doc comments
Diffstat (limited to 'src/tokenizer.cpp')
-rw-r--r--src/tokenizer.cpp53
1 files changed, 51 insertions, 2 deletions
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 71a24fe726..399597b7bc 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -196,6 +196,8 @@ enum TokenizeState {
TokenizeStateSawStar,
TokenizeStateSawStarPercent,
TokenizeStateSawSlash,
+ TokenizeStateSawSlash2,
+ TokenizeStateSawSlash3,
TokenizeStateSawBackslash,
TokenizeStateSawPercent,
TokenizeStateSawPlus,
@@ -206,6 +208,7 @@ enum TokenizeState {
TokenizeStateSawCaret,
TokenizeStateSawBar,
TokenizeStateSawBarBar,
+ TokenizeStateDocComment,
TokenizeStateLineComment,
TokenizeStateLineString,
TokenizeStateLineStringEnd,
@@ -910,8 +913,7 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateSawSlash:
switch (c) {
case '/':
- cancel_token(&t);
- t.state = TokenizeStateLineComment;
+ t.state = TokenizeStateSawSlash2;
break;
case '=':
set_token_id(&t, t.cur_tok, TokenIdDivEq);
@@ -925,6 +927,38 @@ void tokenize(Buf *buf, Tokenization *out) {
continue;
}
break;
+ case TokenizeStateSawSlash2:
+ switch (c) {
+ case '/':
+ t.state = TokenizeStateSawSlash3;
+ break;
+ case '\n':
+ cancel_token(&t);
+ t.state = TokenizeStateStart;
+ break;
+ default:
+ cancel_token(&t);
+ t.state = TokenizeStateLineComment;
+ break;
+ }
+ break;
+ case TokenizeStateSawSlash3:
+ switch (c) {
+ case '/':
+ cancel_token(&t);
+ t.state = TokenizeStateLineComment;
+ break;
+ case '\n':
+ set_token_id(&t, t.cur_tok, TokenIdDocComment);
+ end_token(&t);
+ t.state = TokenizeStateStart;
+ break;
+ default:
+ set_token_id(&t, t.cur_tok, TokenIdDocComment);
+ t.state = TokenizeStateDocComment;
+ break;
+ }
+ break;
case TokenizeStateSawBackslash:
switch (c) {
case '\\':
@@ -1004,6 +1038,17 @@ void tokenize(Buf *buf, Tokenization *out) {
break;
}
break;
+ case TokenizeStateDocComment:
+ switch (c) {
+ case '\n':
+ end_token(&t);
+ t.state = TokenizeStateStart;
+ break;
+ default:
+ // do nothing
+ break;
+ }
+ break;
case TokenizeStateSymbolFirstC:
switch (c) {
case '"':
@@ -1466,6 +1511,7 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateLineStringEnd:
case TokenizeStateSawBarBar:
case TokenizeStateLBracket:
+ case TokenizeStateDocComment:
end_token(&t);
break;
case TokenizeStateSawDotDot:
@@ -1478,6 +1524,8 @@ void tokenize(Buf *buf, Tokenization *out) {
tokenize_error(&t, "unexpected EOF");
break;
case TokenizeStateLineComment:
+ case TokenizeStateSawSlash2:
+ case TokenizeStateSawSlash3:
break;
}
if (t.state != TokenizeStateError) {
@@ -1524,6 +1572,7 @@ const char * token_name(TokenId id) {
case TokenIdComma: return ",";
case TokenIdDash: return "-";
case TokenIdDivEq: return "/=";
+ case TokenIdDocComment: return "DocComment";
case TokenIdDot: return ".";
case TokenIdEllipsis2: return "..";
case TokenIdEllipsis3: return "...";