1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package parser implements a parser for Go source files. Input may be
6 // provided in a variety of forms (see the various Parse* functions); the
7 // output is an abstract syntax tree (AST) representing the Go source. The
8 // parser is invoked through one of the Parse* functions.
10 // The parser accepts a larger language than is syntactically permitted by
11 // the Go spec, for simplicity, and for improved robustness in the presence
12 // of syntax errors. For instance, in method declarations, the receiver is
13 // treated like an ordinary parameter list and thus may contain multiple
14 // entries where the spec permits exactly one. Consequently, the corresponding
15 // field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
22 "go/internal/typeparams"
28 // The parser structure holds the parser's internal state.
31 errors scanner.ErrorList
32 scanner scanner.Scanner
35 mode Mode // parsing mode
36 trace bool // == (mode&Trace != 0)
37 indent int // indentation used for tracing output
40 comments []*ast.CommentGroup
41 leadComment *ast.CommentGroup // last lead comment
42 lineComment *ast.CommentGroup // last line comment
43 top bool // in top of file (before package clause)
44 goVersion string // minimum Go version found in //go:build comment
47 pos token.Pos // token position
48 tok token.Token // one token look-ahead
49 lit string // token literal
52 // (used to limit the number of calls to parser.advance
53 // w/o making scanning progress - avoids potential endless
54 // loops across multiple parser functions during error recovery)
55 syncPos token.Pos // last synchronization position
56 syncCnt int // number of parser.advance calls without progress
58 // Non-syntactic parser control
59 exprLev int // < 0: in control clause, >= 0: in expression
60 inRhs bool // if set, the parser is parsing a rhs expression
62 imports []*ast.ImportSpec // list of imports
64 // nestLev is used to track and limit the recursion depth
69 func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
70 p.file = fset.AddFile(filename, -1, len(src))
71 eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
72 p.scanner.Init(p.file, src, eh, scanner.ScanComments)
76 p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
80 // ----------------------------------------------------------------------------
83 func (p *parser) printTrace(a ...any) {
84 const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
86 pos := p.file.Position(p.pos)
87 fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
98 func trace(p *parser, msg string) *parser {
99 p.printTrace(msg, "(")
104 // Usage pattern: defer un(trace(p, "..."))
110 // maxNestLev is the deepest we're willing to recurse during parsing
111 const maxNestLev int = 1e5
113 func incNestLev(p *parser) *parser {
115 if p.nestLev > maxNestLev {
116 p.error(p.pos, "exceeded max nesting depth")
122 // decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
123 // It is used along with incNestLev in a similar fashion to how un and trace are used.
124 func decNestLev(p *parser) {
128 // Advance to the next token.
129 func (p *parser) next0() {
130 // Because of one-token look-ahead, print the previous token
131 // when tracing as it provides a more readable output. The
132 // very first token (!p.pos.IsValid()) is not initialized
133 // (it is token.ILLEGAL), so don't print it.
134 if p.trace && p.pos.IsValid() {
137 case p.tok.IsLiteral():
138 p.printTrace(s, p.lit)
139 case p.tok.IsOperator(), p.tok.IsKeyword():
140 p.printTrace("\"" + s + "\"")
147 p.pos, p.tok, p.lit = p.scanner.Scan()
148 if p.tok == token.COMMENT {
149 if p.top && strings.HasPrefix(p.lit, "//go:build") {
150 if x, err := constraint.Parse(p.lit); err == nil {
151 p.goVersion = constraint.GoVersion(x)
154 if p.mode&ParseComments == 0 {
158 // Found a non-comment; top of file is over.
165 // Consume a comment and return it and the line on which it ends.
166 func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
167 // /*-style comments may end on a different line than where they start.
168 // Scan the comment for '\n' chars and adjust endline accordingly.
169 endline = p.file.Line(p.pos)
171 // don't use range here - no need to decode Unicode code points
172 for i := 0; i < len(p.lit); i++ {
173 if p.lit[i] == '\n' {
179 comment = &ast.Comment{Slash: p.pos, Text: p.lit}
185 // Consume a group of adjacent comments, add it to the parser's
186 // comments list, and return it together with the line at which
187 // the last comment in the group ends. A non-comment token or n
188 // empty lines terminate a comment group.
189 func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
190 var list []*ast.Comment
191 endline = p.file.Line(p.pos)
192 for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
193 var comment *ast.Comment
194 comment, endline = p.consumeComment()
195 list = append(list, comment)
198 // add comment group to the comments list
199 comments = &ast.CommentGroup{List: list}
200 p.comments = append(p.comments, comments)
205 // Advance to the next non-comment token. In the process, collect
206 // any comment groups encountered, and remember the last lead and
209 // A lead comment is a comment group that starts and ends in a
210 // line without any other tokens and that is followed by a non-comment
211 // token on the line immediately after the comment group.
213 // A line comment is a comment group that follows a non-comment
214 // token on the same line, and that has no tokens after it on the line
217 // Lead and line comments may be considered documentation that is
218 // stored in the AST.
219 func (p *parser) next() {
225 if p.tok == token.COMMENT {
226 var comment *ast.CommentGroup
229 if p.file.Line(p.pos) == p.file.Line(prev) {
230 // The comment is on same line as the previous token; it
231 // cannot be a lead comment but may be a line comment.
232 comment, endline = p.consumeCommentGroup(0)
233 if p.file.Line(p.pos) != endline || p.tok == token.SEMICOLON || p.tok == token.EOF {
234 // The next token is on a different line, thus
235 // the last comment group is a line comment.
236 p.lineComment = comment
240 // consume successor comments, if any
242 for p.tok == token.COMMENT {
243 comment, endline = p.consumeCommentGroup(1)
246 if endline+1 == p.file.Line(p.pos) {
247 // The next token is following on the line immediately after the
248 // comment group, thus the last comment group is a lead comment.
249 p.leadComment = comment
254 // A bailout panic is raised to indicate early termination. pos and msg are
255 // only populated when bailing out of object resolution.
256 type bailout struct {
261 func (p *parser) error(pos token.Pos, msg string) {
263 defer un(trace(p, "error: "+msg))
266 epos := p.file.Position(pos)
268 // If AllErrors is not set, discard errors reported on the same line
269 // as the last recorded error and stop parsing if there are more than
271 if p.mode&AllErrors == 0 {
273 if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
274 return // discard - likely a spurious error
281 p.errors.Add(epos, msg)
284 func (p *parser) errorExpected(pos token.Pos, msg string) {
285 msg = "expected " + msg
287 // the error happened at the current position;
288 // make the error message more specific
290 case p.tok == token.SEMICOLON && p.lit == "\n":
291 msg += ", found newline"
292 case p.tok.IsLiteral():
293 // print 123 rather than 'INT', etc.
294 msg += ", found " + p.lit
296 msg += ", found '" + p.tok.String() + "'"
302 func (p *parser) expect(tok token.Token) token.Pos {
305 p.errorExpected(pos, "'"+tok.String()+"'")
307 p.next() // make progress
311 // expect2 is like expect, but it returns an invalid position
312 // if the expected token is not found.
313 func (p *parser) expect2(tok token.Token) (pos token.Pos) {
317 p.errorExpected(p.pos, "'"+tok.String()+"'")
319 p.next() // make progress
323 // expectClosing is like expect but provides a better error message
324 // for the common case of a missing comma before a newline.
325 func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
326 if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
327 p.error(p.pos, "missing ',' before newline in "+context)
333 // expectSemi consumes a semicolon and returns the applicable line comment.
334 func (p *parser) expectSemi() (comment *ast.CommentGroup) {
335 // semicolon is optional before a closing ')' or '}'
336 if p.tok != token.RPAREN && p.tok != token.RBRACE {
339 // permit a ',' instead of a ';' but complain
340 p.errorExpected(p.pos, "';'")
342 case token.SEMICOLON:
344 // explicit semicolon
346 comment = p.lineComment // use following comments
348 // artificial semicolon
349 comment = p.lineComment // use preceding comments
354 p.errorExpected(p.pos, "';'")
361 func (p *parser) atComma(context string, follow token.Token) bool {
362 if p.tok == token.COMMA {
367 if p.tok == token.SEMICOLON && p.lit == "\n" {
368 msg += " before newline"
370 p.error(p.pos, msg+" in "+context)
371 return true // "insert" comma and continue
376 func assert(cond bool, msg string) {
378 panic("go/parser internal error: " + msg)
382 // advance consumes tokens until the current token p.tok
383 // is in the 'to' set, or token.EOF. For error recovery.
384 func (p *parser) advance(to map[token.Token]bool) {
385 for ; p.tok != token.EOF; p.next() {
387 // Return only if parser made some progress since last
388 // sync or if it has not reached 10 advance calls without
389 // progress. Otherwise consume at least one token to
390 // avoid an endless parser loop (it is possible that
391 // both parseOperand and parseStmt call advance and
392 // correctly do not advance, thus the need for the
393 // invocation limit p.syncCnt).
394 if p.pos == p.syncPos && p.syncCnt < 10 {
398 if p.pos > p.syncPos {
403 // Reaching here indicates a parser bug, likely an
404 // incorrect token list in this function, but it only
405 // leads to skipping of possibly correct code if a
406 // previous error is present, and thus is preferred
407 // over a non-terminating parse.
412 var stmtStart = map[token.Token]bool{
415 token.CONTINUE: true,
417 token.FALLTHROUGH: true,
429 var declStart = map[token.Token]bool{
436 var exprEnd = map[token.Token]bool{
439 token.SEMICOLON: true,
445 // safePos returns a valid file position for a given position: If pos
446 // is valid to begin with, safePos returns pos. If pos is out-of-range,
447 // safePos returns the EOF position.
449 // This is hack to work around "artificial" end positions in the AST which
450 // are computed by adding 1 to (presumably valid) token positions. If the
451 // token positions are invalid due to parse errors, the resulting end position
452 // may be past the file's EOF position, which would lead to panics if used
454 func (p *parser) safePos(pos token.Pos) (res token.Pos) {
456 if recover() != nil {
457 res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
460 _ = p.file.Offset(pos) // trigger a panic if position is out-of-range
464 // ----------------------------------------------------------------------------
467 func (p *parser) parseIdent() *ast.Ident {
470 if p.tok == token.IDENT {
474 p.expect(token.IDENT) // use expect() error handling
476 return &ast.Ident{NamePos: pos, Name: name}
479 func (p *parser) parseIdentList() (list []*ast.Ident) {
481 defer un(trace(p, "IdentList"))
484 list = append(list, p.parseIdent())
485 for p.tok == token.COMMA {
487 list = append(list, p.parseIdent())
493 // ----------------------------------------------------------------------------
494 // Common productions
496 // If lhs is set, result list elements which are identifiers are not resolved.
497 func (p *parser) parseExprList() (list []ast.Expr) {
499 defer un(trace(p, "ExpressionList"))
502 list = append(list, p.parseExpr())
503 for p.tok == token.COMMA {
505 list = append(list, p.parseExpr())
511 func (p *parser) parseList(inRhs bool) []ast.Expr {
514 list := p.parseExprList()
519 // ----------------------------------------------------------------------------
522 func (p *parser) parseType() ast.Expr {
524 defer un(trace(p, "Type"))
527 typ := p.tryIdentOrType()
531 p.errorExpected(pos, "type")
533 return &ast.BadExpr{From: pos, To: p.pos}
539 func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr {
541 defer un(trace(p, "QualifiedIdent"))
544 typ := p.parseTypeName(ident)
545 if p.tok == token.LBRACK {
546 typ = p.parseTypeInstance(typ)
552 // If the result is an identifier, it is not resolved.
553 func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr {
555 defer un(trace(p, "TypeName"))
559 ident = p.parseIdent()
562 if p.tok == token.PERIOD {
563 // ident is a package name
565 sel := p.parseIdent()
566 return &ast.SelectorExpr{X: ident, Sel: sel}
572 // "[" has already been consumed, and lbrack is its position.
573 // If len != nil it is the already consumed array length.
574 func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType {
576 defer un(trace(p, "ArrayType"))
581 // always permit ellipsis for more fault-tolerant parsing
582 if p.tok == token.ELLIPSIS {
583 len = &ast.Ellipsis{Ellipsis: p.pos}
585 } else if p.tok != token.RBRACK {
590 if p.tok == token.COMMA {
591 // Trailing commas are accepted in type parameter
592 // lists but not in array type declarations.
593 // Accept for better error handling but complain.
594 p.error(p.pos, "unexpected comma; expecting ]")
597 p.expect(token.RBRACK)
599 return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
602 func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) {
604 defer un(trace(p, "ArrayFieldOrTypeInstance"))
607 lbrack := p.expect(token.LBRACK)
608 trailingComma := token.NoPos // if valid, the position of a trailing comma preceding the ']'
610 if p.tok != token.RBRACK {
612 args = append(args, p.parseRhs())
613 for p.tok == token.COMMA {
616 if p.tok == token.RBRACK {
617 trailingComma = comma
620 args = append(args, p.parseRhs())
624 rbrack := p.expect(token.RBRACK)
629 return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt}
634 elt := p.tryIdentOrType()
637 if trailingComma.IsValid() {
638 // Trailing commas are invalid in array type fields.
639 p.error(trailingComma, "unexpected comma; expecting ]")
641 return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt}
645 // x[P], x[P1, P2], ...
646 return nil, typeparams.PackIndexExpr(x, lbrack, args, rbrack)
649 func (p *parser) parseFieldDecl() *ast.Field {
651 defer un(trace(p, "FieldDecl"))
656 var names []*ast.Ident
660 name := p.parseIdent()
661 if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE {
664 if p.tok == token.PERIOD {
665 typ = p.parseQualifiedIdent(name)
668 // name1, name2, ... T
669 names = []*ast.Ident{name}
670 for p.tok == token.COMMA {
672 names = append(names, p.parseIdent())
674 // Careful dance: We don't know if we have an embedded instantiated
675 // type T[P1, P2, ...] or a field T of array type []E or [P]E.
676 if len(names) == 1 && p.tok == token.LBRACK {
677 name, typ = p.parseArrayFieldOrTypeInstance(name)
689 if p.tok == token.LPAREN {
691 p.error(p.pos, "cannot parenthesize embedded type")
693 typ = p.parseQualifiedIdent(nil)
694 // expect closing ')' but no need to complain if missing
695 if p.tok == token.RPAREN {
700 typ = p.parseQualifiedIdent(nil)
702 typ = &ast.StarExpr{Star: star, X: typ}
705 p.error(p.pos, "cannot parenthesize embedded type")
707 if p.tok == token.MUL {
711 typ = &ast.StarExpr{Star: star, X: p.parseQualifiedIdent(nil)}
714 typ = p.parseQualifiedIdent(nil)
716 // expect closing ')' but no need to complain if missing
717 if p.tok == token.RPAREN {
723 p.errorExpected(pos, "field name or embedded type")
725 typ = &ast.BadExpr{From: pos, To: p.pos}
728 var tag *ast.BasicLit
729 if p.tok == token.STRING {
730 tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
734 comment := p.expectSemi()
736 field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: comment}
740 func (p *parser) parseStructType() *ast.StructType {
742 defer un(trace(p, "StructType"))
745 pos := p.expect(token.STRUCT)
746 lbrace := p.expect(token.LBRACE)
747 var list []*ast.Field
748 for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
749 // a field declaration cannot start with a '(' but we accept
750 // it here for more robust parsing and better error messages
751 // (parseFieldDecl will check and complain if necessary)
752 list = append(list, p.parseFieldDecl())
754 rbrace := p.expect(token.RBRACE)
756 return &ast.StructType{
758 Fields: &ast.FieldList{
766 func (p *parser) parsePointerType() *ast.StarExpr {
768 defer un(trace(p, "PointerType"))
771 star := p.expect(token.MUL)
772 base := p.parseType()
774 return &ast.StarExpr{Star: star, X: base}
777 func (p *parser) parseDotsType() *ast.Ellipsis {
779 defer un(trace(p, "DotsType"))
782 pos := p.expect(token.ELLIPSIS)
785 return &ast.Ellipsis{Ellipsis: pos, Elt: elt}
793 func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) {
794 // TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax
797 defer un(trace(p, "ParamDeclOrNil"))
802 p.tok = token.IDENT // force token.IDENT case in switch below
803 } else if typeSetsOK && p.tok == token.TILDE {
805 return field{nil, p.embeddedElem(nil)}
815 f.name = p.parseIdent()
818 case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
820 f.typ = p.parseType()
823 // name "[" type1, ..., typeN "]" or name "[" n "]" type
824 f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name)
828 f.typ = p.parseDotsType()
829 return // don't allow ...type "|" ...
833 f.typ = p.parseQualifiedIdent(f.name)
838 f.typ = p.embeddedElem(nil)
845 f.typ = p.embeddedElem(f.name)
851 case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
853 f.typ = p.parseType()
858 f.typ = p.parseDotsType()
859 return // don't allow ...type "|" ...
862 // TODO(rfindley): this is incorrect in the case of type parameter lists
863 // (should be "']'" in that case)
864 p.errorExpected(p.pos, "')'")
869 if typeSetsOK && p.tok == token.OR && f.typ != nil {
870 f.typ = p.embeddedElem(f.typ)
876 func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token) (params []*ast.Field) {
878 defer un(trace(p, "ParameterList"))
881 // Type parameters are the only parameter list closed by ']'.
882 tparams := closing == token.RBRACK
883 // Type set notation is ok in type parameter lists.
884 typeSetsOK := tparams
892 var named int // number of parameters that have an explicit name and type
894 for name0 != nil || p.tok != closing && p.tok != token.EOF {
898 typ0 = p.embeddedElem(typ0)
900 par = field{name0, typ0}
902 par = p.parseParamDecl(name0, typeSetsOK)
904 name0 = nil // 1st name was consumed if present
905 typ0 = nil // 1st typ was consumed if present
906 if par.name != nil || par.typ != nil {
907 list = append(list, par)
908 if par.name != nil && par.typ != nil {
912 if !p.atComma("parameter list", closing) {
919 return // not uncommon
922 // TODO(gri) parameter distribution and conversion to []*ast.Field
923 // can be combined and made more efficient
925 // distribute parameter types
927 // all unnamed => found names are type names
928 for i := 0; i < len(list); i++ {
930 if typ := par.name; typ != nil {
936 p.error(pos, "type parameters must be named")
938 } else if named != len(list) {
939 // some named => all must be named
943 for i := len(list) - 1; i >= 0; i-- {
944 if par := &list[i]; par.typ != nil {
948 missingName = par.typ.Pos()
949 n := ast.NewIdent("_")
950 n.NamePos = typ.Pos() // correct position
953 } else if typ != nil {
956 // par.typ == nil && typ == nil => we only have a par.name
958 missingName = par.name.Pos()
959 par.typ = &ast.BadExpr{From: par.name.Pos(), To: p.pos}
964 p.error(missingName, "type parameters must be named")
966 p.error(pos, "mixed named and unnamed parameters")
971 // convert list []*ast.Field
973 // parameter list consists of types only
974 for _, par := range list {
975 assert(par.typ != nil, "nil type in unnamed parameter list")
976 params = append(params, &ast.Field{Type: par.typ})
981 // parameter list consists of named parameters with types
982 var names []*ast.Ident
984 addParams := func() {
985 assert(typ != nil, "nil type in named parameter list")
986 field := &ast.Field{Names: names, Type: typ}
987 params = append(params, field)
990 for _, par := range list {
997 names = append(names, par.name)
1005 func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) {
1007 defer un(trace(p, "Parameters"))
1010 if acceptTParams && p.tok == token.LBRACK {
1013 // [T any](params) syntax
1014 list := p.parseParameterList(nil, nil, token.RBRACK)
1015 rbrack := p.expect(token.RBRACK)
1016 tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack}
1017 // Type parameter lists must not be empty.
1018 if tparams.NumFields() == 0 {
1019 p.error(tparams.Closing, "empty type parameter list")
1020 tparams = nil // avoid follow-on errors
1024 opening := p.expect(token.LPAREN)
1026 var fields []*ast.Field
1027 if p.tok != token.RPAREN {
1028 fields = p.parseParameterList(nil, nil, token.RPAREN)
1031 rparen := p.expect(token.RPAREN)
1032 params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen}
1037 func (p *parser) parseResult() *ast.FieldList {
1039 defer un(trace(p, "Result"))
1042 if p.tok == token.LPAREN {
1043 _, results := p.parseParameters(false)
1047 typ := p.tryIdentOrType()
1049 list := make([]*ast.Field, 1)
1050 list[0] = &ast.Field{Type: typ}
1051 return &ast.FieldList{List: list}
1057 func (p *parser) parseFuncType() *ast.FuncType {
1059 defer un(trace(p, "FuncType"))
1062 pos := p.expect(token.FUNC)
1063 tparams, params := p.parseParameters(true)
1065 p.error(tparams.Pos(), "function type must have no type parameters")
1067 results := p.parseResult()
1069 return &ast.FuncType{Func: pos, Params: params, Results: results}
1072 func (p *parser) parseMethodSpec() *ast.Field {
1074 defer un(trace(p, "MethodSpec"))
1077 doc := p.leadComment
1078 var idents []*ast.Ident
1080 x := p.parseTypeName(nil)
1081 if ident, _ := x.(*ast.Ident); ident != nil {
1083 case p.tok == token.LBRACK:
1084 // generic method or embedded instantiated type
1090 if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK {
1091 // generic method m[T any]
1093 // Interface methods do not have type parameters. We parse them for a
1094 // better error message and improved error recovery.
1095 _ = p.parseParameterList(name0, nil, token.RBRACK)
1096 _ = p.expect(token.RBRACK)
1097 p.error(lbrack, "interface method must have no type parameters")
1099 // TODO(rfindley) refactor to share code with parseFuncType.
1100 _, params := p.parseParameters(false)
1101 results := p.parseResult()
1102 idents = []*ast.Ident{ident}
1103 typ = &ast.FuncType{
1109 // embedded instantiated type
1110 // TODO(rfindley) should resolve all identifiers in x.
1111 list := []ast.Expr{x}
1112 if p.atComma("type argument list", token.RBRACK) {
1115 for p.tok != token.RBRACK && p.tok != token.EOF {
1116 list = append(list, p.parseType())
1117 if !p.atComma("type argument list", token.RBRACK) {
1124 rbrack := p.expectClosing(token.RBRACK, "type argument list")
1125 typ = typeparams.PackIndexExpr(ident, lbrack, list, rbrack)
1127 case p.tok == token.LPAREN:
1129 // TODO(rfindley) refactor to share code with parseFuncType.
1130 _, params := p.parseParameters(false)
1131 results := p.parseResult()
1132 idents = []*ast.Ident{ident}
1133 typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
1139 // embedded, possibly instantiated type
1141 if p.tok == token.LBRACK {
1142 // embedded instantiated interface
1143 typ = p.parseTypeInstance(typ)
1147 // Comment is added at the callsite: the field below may joined with
1148 // additional type specs using '|'.
1149 // TODO(rfindley) this should be refactored.
1150 // TODO(rfindley) add more tests for comment handling.
1151 return &ast.Field{Doc: doc, Names: idents, Type: typ}
1154 func (p *parser) embeddedElem(x ast.Expr) ast.Expr {
1156 defer un(trace(p, "EmbeddedElem"))
1159 x = p.embeddedTerm()
1161 for p.tok == token.OR {
1162 t := new(ast.BinaryExpr)
1167 t.Y = p.embeddedTerm()
1173 func (p *parser) embeddedTerm() ast.Expr {
1175 defer un(trace(p, "EmbeddedTerm"))
1177 if p.tok == token.TILDE {
1178 t := new(ast.UnaryExpr)
1186 t := p.tryIdentOrType()
1189 p.errorExpected(pos, "~ term or type")
1191 return &ast.BadExpr{From: pos, To: p.pos}
1197 func (p *parser) parseInterfaceType() *ast.InterfaceType {
1199 defer un(trace(p, "InterfaceType"))
1202 pos := p.expect(token.INTERFACE)
1203 lbrace := p.expect(token.LBRACE)
1205 var list []*ast.Field
1210 case p.tok == token.IDENT:
1211 f := p.parseMethodSpec()
1213 f.Type = p.embeddedElem(f.Type)
1215 f.Comment = p.expectSemi()
1216 list = append(list, f)
1217 case p.tok == token.TILDE:
1218 typ := p.embeddedElem(nil)
1219 comment := p.expectSemi()
1220 list = append(list, &ast.Field{Type: typ, Comment: comment})
1222 if t := p.tryIdentOrType(); t != nil {
1223 typ := p.embeddedElem(t)
1224 comment := p.expectSemi()
1225 list = append(list, &ast.Field{Type: typ, Comment: comment})
1232 // TODO(rfindley): the error produced here could be improved, since we could
1233 // accept an identifier, 'type', or a '}' at this point.
1234 rbrace := p.expect(token.RBRACE)
1236 return &ast.InterfaceType{
1238 Methods: &ast.FieldList{
1246 func (p *parser) parseMapType() *ast.MapType {
1248 defer un(trace(p, "MapType"))
1251 pos := p.expect(token.MAP)
1252 p.expect(token.LBRACK)
1253 key := p.parseType()
1254 p.expect(token.RBRACK)
1255 value := p.parseType()
1257 return &ast.MapType{Map: pos, Key: key, Value: value}
1260 func (p *parser) parseChanType() *ast.ChanType {
1262 defer un(trace(p, "ChanType"))
1266 dir := ast.SEND | ast.RECV
1268 if p.tok == token.CHAN {
1270 if p.tok == token.ARROW {
1276 arrow = p.expect(token.ARROW)
1277 p.expect(token.CHAN)
1280 value := p.parseType()
1282 return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
1285 func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
1287 defer un(trace(p, "TypeInstance"))
1290 opening := p.expect(token.LBRACK)
1293 for p.tok != token.RBRACK && p.tok != token.EOF {
1294 list = append(list, p.parseType())
1295 if !p.atComma("type argument list", token.RBRACK) {
1302 closing := p.expectClosing(token.RBRACK, "type argument list")
1305 p.errorExpected(closing, "type argument list")
1306 return &ast.IndexExpr{
1309 Index: &ast.BadExpr{From: opening + 1, To: closing},
1314 return typeparams.PackIndexExpr(typ, opening, list, closing)
1317 func (p *parser) tryIdentOrType() ast.Expr {
1318 defer decNestLev(incNestLev(p))
1322 typ := p.parseTypeName(nil)
1323 if p.tok == token.LBRACK {
1324 typ = p.parseTypeInstance(typ)
1328 lbrack := p.expect(token.LBRACK)
1329 return p.parseArrayType(lbrack, nil)
1331 return p.parseStructType()
1333 return p.parsePointerType()
1335 return p.parseFuncType()
1336 case token.INTERFACE:
1337 return p.parseInterfaceType()
1339 return p.parseMapType()
1340 case token.CHAN, token.ARROW:
1341 return p.parseChanType()
1345 typ := p.parseType()
1346 rparen := p.expect(token.RPAREN)
1347 return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
1354 // ----------------------------------------------------------------------------
1357 func (p *parser) parseStmtList() (list []ast.Stmt) {
1359 defer un(trace(p, "StatementList"))
1362 for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
1363 list = append(list, p.parseStmt())
1369 func (p *parser) parseBody() *ast.BlockStmt {
1371 defer un(trace(p, "Body"))
1374 lbrace := p.expect(token.LBRACE)
1375 list := p.parseStmtList()
1376 rbrace := p.expect2(token.RBRACE)
1378 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1381 func (p *parser) parseBlockStmt() *ast.BlockStmt {
1383 defer un(trace(p, "BlockStmt"))
1386 lbrace := p.expect(token.LBRACE)
1387 list := p.parseStmtList()
1388 rbrace := p.expect2(token.RBRACE)
1390 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1393 // ----------------------------------------------------------------------------
1396 func (p *parser) parseFuncTypeOrLit() ast.Expr {
1398 defer un(trace(p, "FuncTypeOrLit"))
1401 typ := p.parseFuncType()
1402 if p.tok != token.LBRACE {
1403 // function type only
1408 body := p.parseBody()
1411 return &ast.FuncLit{Type: typ, Body: body}
1414 // parseOperand may return an expression or a raw type (incl. array
1415 // types of the form [...]T). Callers must verify the result.
1416 func (p *parser) parseOperand() ast.Expr {
1418 defer un(trace(p, "Operand"))
1426 case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
1427 x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
1435 x := p.parseRhs() // types may be parenthesized: (some type)
1437 rparen := p.expect(token.RPAREN)
1438 return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
1441 return p.parseFuncTypeOrLit()
1444 if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters
1445 // could be type for composite literal or conversion
1446 _, isIdent := typ.(*ast.Ident)
1447 assert(!isIdent, "type cannot be identifier")
1453 p.errorExpected(pos, "operand")
1454 p.advance(stmtStart)
1455 return &ast.BadExpr{From: pos, To: p.pos}
1458 func (p *parser) parseSelector(x ast.Expr) ast.Expr {
1460 defer un(trace(p, "Selector"))
1463 sel := p.parseIdent()
1465 return &ast.SelectorExpr{X: x, Sel: sel}
1468 func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
1470 defer un(trace(p, "TypeAssertion"))
1473 lparen := p.expect(token.LPAREN)
1475 if p.tok == token.TYPE {
1476 // type switch: typ == nil
1481 rparen := p.expect(token.RPAREN)
1483 return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
1486 func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
1488 defer un(trace(p, "parseIndexOrSliceOrInstance"))
1491 lbrack := p.expect(token.LBRACK)
1492 if p.tok == token.RBRACK {
1493 // empty index, slice or index expressions are not permitted;
1494 // accept them for parsing tolerance, but complain
1495 p.errorExpected(p.pos, "operand")
1498 return &ast.IndexExpr{
1501 Index: &ast.BadExpr{From: rbrack, To: rbrack},
1507 const N = 3 // change the 3 to 2 to disable 3-index slices
1509 var index [N]ast.Expr
1510 var colons [N - 1]token.Pos
1511 if p.tok != token.COLON {
1512 // We can't know if we have an index expression or a type instantiation;
1513 // so even if we see a (named) type we are not going to be in type context.
1514 index[0] = p.parseRhs()
1520 for p.tok == token.COLON && ncolons < len(colons) {
1521 colons[ncolons] = p.pos
1524 if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
1525 index[ncolons] = p.parseRhs()
1529 // instance expression
1530 args = append(args, index[0])
1531 for p.tok == token.COMMA {
1533 if p.tok != token.RBRACK && p.tok != token.EOF {
1534 args = append(args, p.parseType())
1540 rbrack := p.expect(token.RBRACK)
1547 // Check presence of middle and final index here rather than during type-checking
1548 // to prevent erroneous programs from passing through gofmt (was issue 7305).
1549 if index[1] == nil {
1550 p.error(colons[0], "middle index required in 3-index slice")
1551 index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
1553 if index[2] == nil {
1554 p.error(colons[1], "final index required in 3-index slice")
1555 index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
1558 return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
1563 return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
1566 // instance expression
1567 return typeparams.PackIndexExpr(x, lbrack, args, rbrack)
1570 func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
1572 defer un(trace(p, "CallOrConversion"))
1575 lparen := p.expect(token.LPAREN)
1578 var ellipsis token.Pos
1579 for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
1580 list = append(list, p.parseRhs()) // builtins may expect a type: make(some type, ...)
1581 if p.tok == token.ELLIPSIS {
1585 if !p.atComma("argument list", token.RPAREN) {
1591 rparen := p.expectClosing(token.RPAREN, "argument list")
1593 return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
1596 func (p *parser) parseValue() ast.Expr {
1598 defer un(trace(p, "Element"))
1601 if p.tok == token.LBRACE {
1602 return p.parseLiteralValue(nil)
1610 func (p *parser) parseElement() ast.Expr {
1612 defer un(trace(p, "Element"))
1616 if p.tok == token.COLON {
1619 x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()}
1625 func (p *parser) parseElementList() (list []ast.Expr) {
1627 defer un(trace(p, "ElementList"))
1630 for p.tok != token.RBRACE && p.tok != token.EOF {
1631 list = append(list, p.parseElement())
1632 if !p.atComma("composite literal", token.RBRACE) {
1641 func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
1643 defer un(trace(p, "LiteralValue"))
1646 lbrace := p.expect(token.LBRACE)
1649 if p.tok != token.RBRACE {
1650 elts = p.parseElementList()
1653 rbrace := p.expectClosing(token.RBRACE, "composite literal")
1654 return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
1657 func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
1659 defer un(trace(p, "PrimaryExpr"))
1663 x = p.parseOperand()
1665 // We track the nesting here rather than at the entry for the function,
1666 // since it can iteratively produce a nested output, and we want to
1667 // limit how deep a structure we generate.
1669 defer func() { p.nestLev -= n }()
1677 x = p.parseSelector(x)
1679 x = p.parseTypeAssertion(x)
1682 p.errorExpected(pos, "selector or type assertion")
1683 // TODO(rFindley) The check for token.RBRACE below is a targeted fix
1684 // to error recovery sufficient to make the x/tools tests to
1685 // pass with the new parsing logic introduced for type
1686 // parameters. Remove this once error recovery has been
1687 // more generally reconsidered.
1688 if p.tok != token.RBRACE {
1689 p.next() // make progress
1691 sel := &ast.Ident{NamePos: pos, Name: "_"}
1692 x = &ast.SelectorExpr{X: x, Sel: sel}
1695 x = p.parseIndexOrSliceOrInstance(x)
1697 x = p.parseCallOrConversion(x)
1699 // operand may have returned a parenthesized complit
1700 // type; accept it but complain if we have a complit
1702 // determine if '{' belongs to a composite literal or a block statement
1704 case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
1708 // x is possibly a composite literal type
1709 case *ast.IndexExpr, *ast.IndexListExpr:
1713 // x is possibly a composite literal type
1714 case *ast.ArrayType, *ast.StructType, *ast.MapType:
1715 // x is a composite literal type
1720 p.error(t.Pos(), "cannot parenthesize type in composite literal")
1721 // already progressed, no need to advance
1723 x = p.parseLiteralValue(x)
1730 func (p *parser) parseUnaryExpr() ast.Expr {
1731 defer decNestLev(incNestLev(p))
1734 defer un(trace(p, "UnaryExpr"))
1738 case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.TILDE:
1739 pos, op := p.pos, p.tok
1741 x := p.parseUnaryExpr()
1742 return &ast.UnaryExpr{OpPos: pos, Op: op, X: x}
1745 // channel type or receive expression
1749 // If the next token is token.CHAN we still don't know if it
1750 // is a channel type or a receive operation - we only know
1751 // once we have found the end of the unary expression. There
1754 // <- type => (<-type) must be channel type
1755 // <- expr => <-(expr) is a receive from an expression
1757 // In the first case, the arrow must be re-associated with
1758 // the channel type parsed already:
1760 // <- (chan type) => (<-chan type)
1761 // <- (chan<- type) => (<-chan (<-type))
1763 x := p.parseUnaryExpr()
1765 // determine which case we have
1766 if typ, ok := x.(*ast.ChanType); ok {
1769 // re-associate position info and <-
1771 for ok && dir == ast.SEND {
1772 if typ.Dir == ast.RECV {
1773 // error: (<-type) is (<-(<-chan T))
1774 p.errorExpected(typ.Arrow, "'chan'")
1776 arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
1777 dir, typ.Dir = typ.Dir, ast.RECV
1778 typ, ok = typ.Value.(*ast.ChanType)
1780 if dir == ast.SEND {
1781 p.errorExpected(arrow, "channel type")
1788 return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: x}
1791 // pointer type or unary "*" expression
1794 x := p.parseUnaryExpr()
1795 return &ast.StarExpr{Star: pos, X: x}
1798 return p.parsePrimaryExpr(nil)
1801 func (p *parser) tokPrec() (token.Token, int) {
1803 if p.inRhs && tok == token.ASSIGN {
1806 return tok, tok.Precedence()
1809 // parseBinaryExpr parses a (possibly) binary expression.
1810 // If x is non-nil, it is used as the left operand.
1812 // TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring.
1813 func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int) ast.Expr {
1815 defer un(trace(p, "BinaryExpr"))
1819 x = p.parseUnaryExpr()
1821 // We track the nesting here rather than at the entry for the function,
1822 // since it can iteratively produce a nested output, and we want to
1823 // limit how deep a structure we generate.
1825 defer func() { p.nestLev -= n }()
1828 op, oprec := p.tokPrec()
1833 y := p.parseBinaryExpr(nil, oprec+1)
1834 x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y}
1838 // The result may be a type or even a raw type ([...]int).
1839 func (p *parser) parseExpr() ast.Expr {
1841 defer un(trace(p, "Expression"))
1844 return p.parseBinaryExpr(nil, token.LowestPrec+1)
1847 func (p *parser) parseRhs() ast.Expr {
1855 // ----------------------------------------------------------------------------
1858 // Parsing modes for parseSimpleStmt.
1865 // parseSimpleStmt returns true as 2nd result if it parsed the assignment
1866 // of a range clause (with mode == rangeOk). The returned statement is an
1867 // assignment with a right-hand side that is a single unary expression of
1868 // the form "range x". No guarantees are given for the left-hand side.
1869 func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
1871 defer un(trace(p, "SimpleStmt"))
1874 x := p.parseList(false)
1878 token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
1879 token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
1880 token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
1881 token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
1882 // assignment statement, possibly part of a range clause
1883 pos, tok := p.pos, p.tok
1887 if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
1890 y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
1893 y = p.parseList(true)
1895 return &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}, isRange
1899 p.errorExpected(x[0].Pos(), "1 expression")
1900 // continue with first expression
1905 // labeled statement
1908 if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
1909 // Go spec: The scope of a label is the body of the function
1910 // in which it is declared and excludes the body of any nested
1912 stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
1915 // The label declaration typically starts at x[0].Pos(), but the label
1916 // declaration may be erroneous due to a token after that position (and
1917 // before the ':'). If SpuriousErrors is not set, the (only) error
1918 // reported for the line is the illegal label error instead of the token
1919 // before the ':' that caused the problem. Thus, use the (latest) colon
1920 // position for error reporting.
1921 p.error(colon, "illegal label declaration")
1922 return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
1929 return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
1931 case token.INC, token.DEC:
1932 // increment or decrement
1933 s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
1939 return &ast.ExprStmt{X: x[0]}, false
1942 func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
1943 x := p.parseRhs() // could be a conversion: (some type)(x)
1944 if t := ast.Unparen(x); t != x {
1945 p.error(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", callType))
1948 if call, isCall := x.(*ast.CallExpr); isCall {
1951 if _, isBad := x.(*ast.BadExpr); !isBad {
1952 // only report error if it's a new one
1953 p.error(p.safePos(x.End()), fmt.Sprintf("expression in %s must be function call", callType))
1958 func (p *parser) parseGoStmt() ast.Stmt {
1960 defer un(trace(p, "GoStmt"))
1963 pos := p.expect(token.GO)
1964 call := p.parseCallExpr("go")
1967 return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
1970 return &ast.GoStmt{Go: pos, Call: call}
1973 func (p *parser) parseDeferStmt() ast.Stmt {
1975 defer un(trace(p, "DeferStmt"))
1978 pos := p.expect(token.DEFER)
1979 call := p.parseCallExpr("defer")
1982 return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
1985 return &ast.DeferStmt{Defer: pos, Call: call}
1988 func (p *parser) parseReturnStmt() *ast.ReturnStmt {
1990 defer un(trace(p, "ReturnStmt"))
1994 p.expect(token.RETURN)
1996 if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
1997 x = p.parseList(true)
2001 return &ast.ReturnStmt{Return: pos, Results: x}
2004 func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
2006 defer un(trace(p, "BranchStmt"))
2009 pos := p.expect(tok)
2010 var label *ast.Ident
2011 if tok != token.FALLTHROUGH && p.tok == token.IDENT {
2012 label = p.parseIdent()
2016 return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
2019 func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
2023 if es, isExpr := s.(*ast.ExprStmt); isExpr {
2026 found := "simple statement"
2027 if _, isAss := s.(*ast.AssignStmt); isAss {
2028 found = "assignment"
2030 p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
2031 return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
2034 // parseIfHeader is an adjusted version of parser.header
2035 // in cmd/compile/internal/syntax/parser.go, which has
2036 // been tuned for better error handling.
2037 func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
2038 if p.tok == token.LBRACE {
2039 p.error(p.pos, "missing condition in if statement")
2040 cond = &ast.BadExpr{From: p.pos, To: p.pos}
2043 // p.tok != token.LBRACE
2045 prevLev := p.exprLev
2048 if p.tok != token.SEMICOLON {
2049 // accept potential variable declaration but complain
2050 if p.tok == token.VAR {
2052 p.error(p.pos, "var declaration not allowed in if initializer")
2054 init, _ = p.parseSimpleStmt(basic)
2057 var condStmt ast.Stmt
2060 lit string // ";" or "\n"; valid if pos.IsValid()
2062 if p.tok != token.LBRACE {
2063 if p.tok == token.SEMICOLON {
2068 p.expect(token.SEMICOLON)
2070 if p.tok != token.LBRACE {
2071 condStmt, _ = p.parseSimpleStmt(basic)
2078 if condStmt != nil {
2079 cond = p.makeExpr(condStmt, "boolean expression")
2080 } else if semi.pos.IsValid() {
2081 if semi.lit == "\n" {
2082 p.error(semi.pos, "unexpected newline, expecting { after if clause")
2084 p.error(semi.pos, "missing condition in if statement")
2088 // make sure we have a valid AST
2090 cond = &ast.BadExpr{From: p.pos, To: p.pos}
2097 func (p *parser) parseIfStmt() *ast.IfStmt {
2098 defer decNestLev(incNestLev(p))
2101 defer un(trace(p, "IfStmt"))
2104 pos := p.expect(token.IF)
2106 init, cond := p.parseIfHeader()
2107 body := p.parseBlockStmt()
2110 if p.tok == token.ELSE {
2114 else_ = p.parseIfStmt()
2116 else_ = p.parseBlockStmt()
2119 p.errorExpected(p.pos, "if statement or block")
2120 else_ = &ast.BadStmt{From: p.pos, To: p.pos}
2126 return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
2129 func (p *parser) parseCaseClause() *ast.CaseClause {
2131 defer un(trace(p, "CaseClause"))
2136 if p.tok == token.CASE {
2138 list = p.parseList(true)
2140 p.expect(token.DEFAULT)
2143 colon := p.expect(token.COLON)
2144 body := p.parseStmtList()
2146 return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
2149 func isTypeSwitchAssert(x ast.Expr) bool {
2150 a, ok := x.(*ast.TypeAssertExpr)
2151 return ok && a.Type == nil
2154 func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
2155 switch t := s.(type) {
2158 return isTypeSwitchAssert(t.X)
2159 case *ast.AssignStmt:
2161 if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
2164 // permit v = x.(type) but complain
2165 p.error(t.TokPos, "expected ':=', found '='")
2175 func (p *parser) parseSwitchStmt() ast.Stmt {
2177 defer un(trace(p, "SwitchStmt"))
2180 pos := p.expect(token.SWITCH)
2183 if p.tok != token.LBRACE {
2184 prevLev := p.exprLev
2186 if p.tok != token.SEMICOLON {
2187 s2, _ = p.parseSimpleStmt(basic)
2189 if p.tok == token.SEMICOLON {
2193 if p.tok != token.LBRACE {
2194 // A TypeSwitchGuard may declare a variable in addition
2195 // to the variable declared in the initial SimpleStmt.
2196 // Introduce extra scope to avoid redeclaration errors:
2198 // switch t := 0; t := x.(T) { ... }
2200 // (this code is not valid Go because the first t
2201 // cannot be accessed and thus is never used, the extra
2202 // scope is needed for the correct error message).
2204 // If we don't have a type switch, s2 must be an expression.
2205 // Having the extra nested but empty scope won't affect it.
2206 s2, _ = p.parseSimpleStmt(basic)
2212 typeSwitch := p.isTypeSwitchGuard(s2)
2213 lbrace := p.expect(token.LBRACE)
2215 for p.tok == token.CASE || p.tok == token.DEFAULT {
2216 list = append(list, p.parseCaseClause())
2218 rbrace := p.expect(token.RBRACE)
2220 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2223 return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
2226 return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
2229 func (p *parser) parseCommClause() *ast.CommClause {
2231 defer un(trace(p, "CommClause"))
2236 if p.tok == token.CASE {
2238 lhs := p.parseList(false)
2239 if p.tok == token.ARROW {
2242 p.errorExpected(lhs[0].Pos(), "1 expression")
2243 // continue with first expression
2248 comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
2251 if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
2252 // RecvStmt with assignment
2254 p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
2255 // continue with first two expressions
2261 comm = &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
2263 // lhs must be single receive operation
2265 p.errorExpected(lhs[0].Pos(), "1 expression")
2266 // continue with first expression
2268 comm = &ast.ExprStmt{X: lhs[0]}
2272 p.expect(token.DEFAULT)
2275 colon := p.expect(token.COLON)
2276 body := p.parseStmtList()
2278 return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
2281 func (p *parser) parseSelectStmt() *ast.SelectStmt {
2283 defer un(trace(p, "SelectStmt"))
2286 pos := p.expect(token.SELECT)
2287 lbrace := p.expect(token.LBRACE)
2289 for p.tok == token.CASE || p.tok == token.DEFAULT {
2290 list = append(list, p.parseCommClause())
2292 rbrace := p.expect(token.RBRACE)
2294 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2296 return &ast.SelectStmt{Select: pos, Body: body}
2299 func (p *parser) parseForStmt() ast.Stmt {
2301 defer un(trace(p, "ForStmt"))
2304 pos := p.expect(token.FOR)
2306 var s1, s2, s3 ast.Stmt
2308 if p.tok != token.LBRACE {
2309 prevLev := p.exprLev
2311 if p.tok != token.SEMICOLON {
2312 if p.tok == token.RANGE {
2313 // "for range x" (nil lhs in assignment)
2316 y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
2317 s2 = &ast.AssignStmt{Rhs: y}
2320 s2, isRange = p.parseSimpleStmt(rangeOk)
2323 if !isRange && p.tok == token.SEMICOLON {
2327 if p.tok != token.SEMICOLON {
2328 s2, _ = p.parseSimpleStmt(basic)
2331 if p.tok != token.LBRACE {
2332 s3, _ = p.parseSimpleStmt(basic)
2338 body := p.parseBlockStmt()
2342 as := s2.(*ast.AssignStmt)
2344 var key, value ast.Expr
2345 switch len(as.Lhs) {
2351 key, value = as.Lhs[0], as.Lhs[1]
2353 p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
2354 return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
2356 // parseSimpleStmt returned a right-hand side that
2357 // is a single unary expression of the form "range x"
2358 x := as.Rhs[0].(*ast.UnaryExpr).X
2359 return &ast.RangeStmt{
2365 Range: as.Rhs[0].Pos(),
2371 // regular for statement
2372 return &ast.ForStmt{
2375 Cond: p.makeExpr(s2, "boolean or range expression"),
2381 func (p *parser) parseStmt() (s ast.Stmt) {
2382 defer decNestLev(incNestLev(p))
2385 defer un(trace(p, "Statement"))
2389 case token.CONST, token.TYPE, token.VAR:
2390 s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
2392 // tokens that may start an expression
2393 token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
2394 token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
2395 token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
2396 s, _ = p.parseSimpleStmt(labelOk)
2397 // because of the required look-ahead, labeled statements are
2398 // parsed by parseSimpleStmt - don't expect a semicolon after
2400 if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
2406 s = p.parseDeferStmt()
2408 s = p.parseReturnStmt()
2409 case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
2410 s = p.parseBranchStmt(p.tok)
2412 s = p.parseBlockStmt()
2417 s = p.parseSwitchStmt()
2419 s = p.parseSelectStmt()
2421 s = p.parseForStmt()
2422 case token.SEMICOLON:
2423 // Is it ever possible to have an implicit semicolon
2424 // producing an empty statement in a valid program?
2425 // (handle correctly anyway)
2426 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
2429 // a semicolon may be omitted before a closing "}"
2430 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
2432 // no statement found
2434 p.errorExpected(pos, "statement")
2435 p.advance(stmtStart)
2436 s = &ast.BadStmt{From: pos, To: p.pos}
2442 // ----------------------------------------------------------------------------
2445 type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
2447 func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
2449 defer un(trace(p, "ImportSpec"))
2452 var ident *ast.Ident
2455 ident = p.parseIdent()
2457 ident = &ast.Ident{NamePos: p.pos, Name: "."}
2463 if p.tok == token.STRING {
2466 } else if p.tok.IsLiteral() {
2467 p.error(pos, "import path must be a string")
2470 p.error(pos, "missing import path")
2473 comment := p.expectSemi()
2476 spec := &ast.ImportSpec{
2479 Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
2482 p.imports = append(p.imports, spec)
2487 func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
2489 defer un(trace(p, keyword.String()+"Spec"))
2492 idents := p.parseIdentList()
2494 var values []ast.Expr
2497 // always permit optional type and initialization for more tolerant parsing
2498 if p.tok != token.EOF && p.tok != token.SEMICOLON && p.tok != token.RPAREN {
2499 typ = p.tryIdentOrType()
2500 if p.tok == token.ASSIGN {
2502 values = p.parseList(true)
2506 if p.tok != token.ASSIGN {
2509 if p.tok == token.ASSIGN {
2511 values = p.parseList(true)
2514 panic("unreachable")
2516 comment := p.expectSemi()
2518 spec := &ast.ValueSpec{
2528 func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) {
2530 defer un(trace(p, "parseGenericType"))
2533 list := p.parseParameterList(name0, typ0, token.RBRACK)
2534 closePos := p.expect(token.RBRACK)
2535 spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos}
2536 // Let the type checker decide whether to accept type parameters on aliases:
2537 // see issue #46477.
2538 if p.tok == token.ASSIGN {
2543 spec.Type = p.parseType()
2546 func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
2548 defer un(trace(p, "TypeSpec"))
2551 name := p.parseIdent()
2552 spec := &ast.TypeSpec{Doc: doc, Name: name}
2554 if p.tok == token.LBRACK {
2555 // spec.Name "[" ...
2556 // array/slice type or type parameter list
2559 if p.tok == token.IDENT {
2560 // We may have an array type or a type parameter list.
2561 // In either case we expect an expression x (which may
2562 // just be a name, or a more complex expression) which
2563 // we can analyze further.
2565 // A type parameter list may have a type bound starting
2566 // with a "[" as in: P []E. In that case, simply parsing
2567 // an expression would lead to an error: P[] is invalid.
2568 // But since index or slice expressions are never constant
2569 // and thus invalid array length expressions, if the name
2570 // is followed by "[" it must be the start of an array or
2571 // slice constraint. Only if we don't see a "[" do we
2572 // need to parse a full expression. Notably, name <- x
2573 // is not a concern because name <- x is a statement and
2574 // not an expression.
2575 var x ast.Expr = p.parseIdent()
2576 if p.tok != token.LBRACK {
2577 // To parse the expression starting with name, expand
2578 // the call sequence we would get by passing in name
2579 // to parser.expr, and pass in name to parsePrimaryExpr.
2581 lhs := p.parsePrimaryExpr(x)
2582 x = p.parseBinaryExpr(lhs, token.LowestPrec+1)
2585 // Analyze expression x. If we can split x into a type parameter
2586 // name, possibly followed by a type parameter type, we consider
2587 // this the start of a type parameter list, with some caveats:
2588 // a single name followed by "]" tilts the decision towards an
2589 // array declaration; a type parameter type that could also be
2590 // an ordinary expression but which is followed by a comma tilts
2591 // the decision towards a type parameter list.
2592 if pname, ptype := extractName(x, p.tok == token.COMMA); pname != nil && (ptype != nil || p.tok != token.RBRACK) {
2593 // spec.Name "[" pname ...
2594 // spec.Name "[" pname ptype ...
2595 // spec.Name "[" pname ptype "," ...
2596 p.parseGenericType(spec, lbrack, pname, ptype) // ptype may be nil
2598 // spec.Name "[" pname "]" ...
2599 // spec.Name "[" x ...
2600 spec.Type = p.parseArrayType(lbrack, x)
2604 spec.Type = p.parseArrayType(lbrack, nil)
2607 // no type parameters
2608 if p.tok == token.ASSIGN {
2613 spec.Type = p.parseType()
2616 spec.Comment = p.expectSemi()
2621 // extractName splits the expression x into (name, expr) if syntactically
2622 // x can be written as name expr. The split only happens if expr is a type
2623 // element (per the isTypeElem predicate) or if force is set.
2624 // If x is just a name, the result is (name, nil). If the split succeeds,
2625 // the result is (name, expr). Otherwise the result is (nil, x).
2628 // x force name expr
2629 // ------------------------------------
2630 // P*[]int T/F P *[]int
2633 // P([]int) T/F P []int
2636 // P*E|F|~G T/F P *E|F|~G
2637 // P*E|F|G T P *E|F|G
2638 // P*E|F|G F nil P*E|F|G
2639 func extractName(x ast.Expr, force bool) (*ast.Ident, ast.Expr) {
2640 switch x := x.(type) {
2643 case *ast.BinaryExpr:
2646 if name, _ := x.X.(*ast.Ident); name != nil && (force || isTypeElem(x.Y)) {
2648 return name, &ast.StarExpr{Star: x.OpPos, X: x.Y}
2651 if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil {
2659 if name, _ := x.Fun.(*ast.Ident); name != nil {
2660 if len(x.Args) == 1 && x.Ellipsis == token.NoPos && (force || isTypeElem(x.Args[0])) {
2661 // x = name "(" x.ArgList[0] ")"
2662 return name, x.Args[0]
2669 // isTypeElem reports whether x is a (possibly parenthesized) type element expression.
2670 // The result is false if x could be a type element OR an ordinary (value) expression.
2671 func isTypeElem(x ast.Expr) bool {
2672 switch x := x.(type) {
2673 case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
2675 case *ast.BinaryExpr:
2676 return isTypeElem(x.X) || isTypeElem(x.Y)
2677 case *ast.UnaryExpr:
2678 return x.Op == token.TILDE
2679 case *ast.ParenExpr:
2680 return isTypeElem(x.X)
2685 func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
2687 defer un(trace(p, "GenDecl("+keyword.String()+")"))
2690 doc := p.leadComment
2691 pos := p.expect(keyword)
2692 var lparen, rparen token.Pos
2694 if p.tok == token.LPAREN {
2697 for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
2698 list = append(list, f(p.leadComment, keyword, iota))
2700 rparen = p.expect(token.RPAREN)
2703 list = append(list, f(nil, keyword, 0))
2706 return &ast.GenDecl{
2716 func (p *parser) parseFuncDecl() *ast.FuncDecl {
2718 defer un(trace(p, "FunctionDecl"))
2721 doc := p.leadComment
2722 pos := p.expect(token.FUNC)
2724 var recv *ast.FieldList
2725 if p.tok == token.LPAREN {
2726 _, recv = p.parseParameters(false)
2729 ident := p.parseIdent()
2731 tparams, params := p.parseParameters(true)
2732 if recv != nil && tparams != nil {
2733 // Method declarations do not have type parameters. We parse them for a
2734 // better error message and improved error recovery.
2735 p.error(tparams.Opening, "method must have no type parameters")
2738 results := p.parseResult()
2740 var body *ast.BlockStmt
2743 body = p.parseBody()
2745 case token.SEMICOLON:
2747 if p.tok == token.LBRACE {
2748 // opening { of function declaration on next line
2749 p.error(p.pos, "unexpected semicolon or newline before {")
2750 body = p.parseBody()
2757 decl := &ast.FuncDecl{
2761 Type: &ast.FuncType{
2763 TypeParams: tparams,
2772 func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
2774 defer un(trace(p, "Declaration"))
2777 var f parseSpecFunction
2780 f = p.parseImportSpec
2782 case token.CONST, token.VAR:
2783 f = p.parseValueSpec
2789 return p.parseFuncDecl()
2793 p.errorExpected(pos, "declaration")
2795 return &ast.BadDecl{From: pos, To: p.pos}
2798 return p.parseGenDecl(p.tok, f)
2801 // ----------------------------------------------------------------------------
2804 func (p *parser) parseFile() *ast.File {
2806 defer un(trace(p, "File"))
2809 // Don't bother parsing the rest if we had errors scanning the first token.
2810 // Likely not a Go source file at all.
2811 if p.errors.Len() != 0 {
2816 doc := p.leadComment
2817 pos := p.expect(token.PACKAGE)
2818 // Go spec: The package clause is not a declaration;
2819 // the package name does not appear in any scope.
2820 ident := p.parseIdent()
2821 if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
2822 p.error(p.pos, "invalid package name _")
2826 // Don't bother parsing the rest if we had errors parsing the package clause.
2827 // Likely not a Go source file at all.
2828 if p.errors.Len() != 0 {
2832 var decls []ast.Decl
2833 if p.mode&PackageClauseOnly == 0 {
2835 for p.tok == token.IMPORT {
2836 decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
2839 if p.mode&ImportsOnly == 0 {
2840 // rest of package body
2841 prev := token.IMPORT
2842 for p.tok != token.EOF {
2843 // Continue to accept import declarations for error tolerance, but complain.
2844 if p.tok == token.IMPORT && prev != token.IMPORT {
2845 p.error(p.pos, "imports must appear before other declarations")
2849 decls = append(decls, p.parseDecl(declStart))
2859 FileStart: token.Pos(p.file.Base()),
2860 FileEnd: token.Pos(p.file.Base() + p.file.Size()),
2862 Comments: p.comments,
2863 GoVersion: p.goVersion,
2865 var declErr func(token.Pos, string)
2866 if p.mode&DeclarationErrors != 0 {
2869 if p.mode&SkipObjectResolution == 0 {
2870 resolveFile(f, p.file, declErr)