--- /dev/null
+// +build ignore
+
+package PACKAGE
} else if importpkg.Name != $2.Name {
Yyerror("conflicting names %s and %s for package %q", importpkg.Name, $2.Name, importpkg.Path);
}
- importpkg.Direct = true;
+ if incannedimport == 0 {
+ importpkg.Direct = true;
+ }
importpkg.Safe = curio.importsafe
if safemode != 0 && !curio.importsafe {
// Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion.
-var omit_pkgs = []string{"runtime", "runtime/race", "runtime/msan"}
+var omit_pkgs = []string{"runtime/internal/atomic", "runtime", "runtime/race", "runtime/msan"}
// Only insert racefuncenter/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
dupok = obj.DUPOK
}
- if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
+ if localpkg.Name == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
goto ok
}
// so this is as good as any.
// another possible choice would be package main,
// but using runtime means fewer copies in .6 files.
- if compiling_runtime != 0 {
+ if localpkg.Name == "runtime" {
for i := EType(1); i <= TBOOL; i++ {
dtypesym(Ptrto(Types[i]))
}
const yyErrCode = 2
const yyMaxDepth = 200
-//line go.y:2315
+//line go.y:2317
func fixlbrace(lbr int) {
// If the opening brace was an LBODY,
// set up for another one now that we're done.
} else if importpkg.Name != yyDollar[2].sym.Name {
Yyerror("conflicting names %s and %s for package %q", importpkg.Name, yyDollar[2].sym.Name, importpkg.Path)
}
- importpkg.Direct = true
+ if incannedimport == 0 {
+ importpkg.Direct = true
+ }
importpkg.Safe = curio.importsafe
if safemode != 0 && !curio.importsafe {
}
case 20:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:329
+ //line go.y:331
{
if yyDollar[1].sym.Name == "safe" {
curio.importsafe = true
}
case 21:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:336
+ //line go.y:338
{
defercheckwidth()
}
case 22:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:340
+ //line go.y:342
{
resumecheckwidth()
unimportfile()
}
case 23:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:349
+ //line go.y:351
{
Yyerror("empty top-level declaration")
yyVAL.list = nil
}
case 25:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:355
+ //line go.y:357
{
yyVAL.list = list1(yyDollar[1].node)
}
case 26:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:359
+ //line go.y:361
{
Yyerror("non-declaration statement outside function body")
yyVAL.list = nil
}
case 27:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:364
+ //line go.y:366
{
yyVAL.list = nil
}
case 28:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:370
+ //line go.y:372
{
yyVAL.list = yyDollar[2].list
}
case 29:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:374
+ //line go.y:376
{
yyVAL.list = yyDollar[3].list
}
case 30:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:378
+ //line go.y:380
{
yyVAL.list = nil
}
case 31:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:382
+ //line go.y:384
{
yyVAL.list = yyDollar[2].list
iota_ = -100000
}
case 32:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:388
+ //line go.y:390
{
yyVAL.list = yyDollar[3].list
iota_ = -100000
}
case 33:
yyDollar = yyS[yypt-7 : yypt+1]
- //line go.y:394
+ //line go.y:396
{
yyVAL.list = concat(yyDollar[3].list, yyDollar[5].list)
iota_ = -100000
}
case 34:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:400
+ //line go.y:402
{
yyVAL.list = nil
iota_ = -100000
}
case 35:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:405
+ //line go.y:407
{
yyVAL.list = list1(yyDollar[2].node)
}
case 36:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:409
+ //line go.y:411
{
yyVAL.list = yyDollar[3].list
}
case 37:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:413
+ //line go.y:415
{
yyVAL.list = nil
}
case 38:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:419
+ //line go.y:421
{
iota_ = 0
}
case 39:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:425
+ //line go.y:427
{
yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, nil)
}
case 40:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:429
+ //line go.y:431
{
yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
}
case 41:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:433
+ //line go.y:435
{
yyVAL.list = variter(yyDollar[1].list, nil, yyDollar[3].list)
}
case 42:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:439
+ //line go.y:441
{
yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
}
case 43:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:443
+ //line go.y:445
{
yyVAL.list = constiter(yyDollar[1].list, nil, yyDollar[3].list)
}
case 45:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:450
+ //line go.y:452
{
yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, nil)
}
case 46:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:454
+ //line go.y:456
{
yyVAL.list = constiter(yyDollar[1].list, nil, nil)
}
case 47:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:460
+ //line go.y:462
{
// different from dclname because the name
// becomes visible right here, not at the end
}
case 48:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:469
+ //line go.y:471
{
yyVAL.node = typedcl1(yyDollar[1].node, yyDollar[2].node, true)
}
case 49:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:475
+ //line go.y:477
{
yyVAL.node = yyDollar[1].node
}
case 50:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:489
+ //line go.y:491
{
yyVAL.node = Nod(OASOP, yyDollar[1].node, yyDollar[3].node)
yyVAL.node.Etype = EType(yyDollar[2].i) // rathole to pass opcode
}
case 51:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:494
+ //line go.y:496
{
if yyDollar[1].list.Next == nil && yyDollar[3].list.Next == nil {
// simple
}
case 52:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:506
+ //line go.y:508
{
if yyDollar[3].list.N.Op == OTYPESW {
yyVAL.node = Nod(OTYPESW, nil, yyDollar[3].list.N.Right)
}
case 53:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:524
+ //line go.y:526
{
yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
yyVAL.node.Implicit = true
}
case 54:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:531
+ //line go.y:533
{
yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
yyVAL.node.Implicit = true
}
case 55:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:540
+ //line go.y:542
{
var n, nn *Node
}
case 56:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:563
+ //line go.y:565
{
var n *Node
}
case 57:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:581
+ //line go.y:583
{
// will be converted to OCASE
// right will point to next case
}
case 58:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:590
+ //line go.y:592
{
var n, nn *Node
}
case 59:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:611
+ //line go.y:613
{
markdcl()
}
case 60:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:615
+ //line go.y:617
{
if yyDollar[3].list == nil {
yyVAL.node = Nod(OEMPTY, nil, nil)
}
case 61:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:626
+ //line go.y:628
{
// If the last token read by the lexer was consumed
// as part of the case, clear it (parser has cleared yychar).
}
case 62:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:637
+ //line go.y:639
{
// This is the only place in the language where a statement
// list is not allowed to drop the final semicolon, because
}
case 63:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:656
+ //line go.y:658
{
yyVAL.list = nil
}
case 64:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:660
+ //line go.y:662
{
yyVAL.list = list(yyDollar[1].list, yyDollar[2].node)
}
case 65:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:666
+ //line go.y:668
{
markdcl()
}
case 66:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:670
+ //line go.y:672
{
yyVAL.list = yyDollar[3].list
popdcl()
}
case 67:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:677
+ //line go.y:679
{
yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
yyVAL.node.List = yyDollar[1].list
}
case 68:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:683
+ //line go.y:685
{
yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
yyVAL.node.List = yyDollar[1].list
}
case 69:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:690
+ //line go.y:692
{
yyVAL.node = Nod(ORANGE, nil, yyDollar[2].node)
yyVAL.node.Etype = 0 // := flag
}
case 70:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:697
+ //line go.y:699
{
// init ; test ; incr
if yyDollar[5].node != nil && yyDollar[5].node.Colas {
}
case 71:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:710
+ //line go.y:712
{
// normal test
yyVAL.node = Nod(OFOR, nil, nil)
}
case 73:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:719
+ //line go.y:721
{
yyVAL.node = yyDollar[1].node
yyVAL.node.Nbody = concat(yyVAL.node.Nbody, yyDollar[2].list)
}
case 74:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:726
+ //line go.y:728
{
markdcl()
}
case 75:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:730
+ //line go.y:732
{
yyVAL.node = yyDollar[3].node
popdcl()
}
case 76:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:737
+ //line go.y:739
{
// test
yyVAL.node = Nod(OIF, nil, nil)
}
case 77:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:743
+ //line go.y:745
{
// init ; test
yyVAL.node = Nod(OIF, nil, nil)
}
case 78:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:755
+ //line go.y:757
{
markdcl()
}
case 79:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:759
+ //line go.y:761
{
if yyDollar[3].node.Left == nil {
Yyerror("missing condition in if statement")
}
case 80:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:765
+ //line go.y:767
{
yyDollar[3].node.Nbody = yyDollar[5].list
}
case 81:
yyDollar = yyS[yypt-8 : yypt+1]
- //line go.y:769
+ //line go.y:771
{
var n *Node
var nn *NodeList
}
case 82:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:787
+ //line go.y:789
{
markdcl()
}
case 83:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:791
+ //line go.y:793
{
if yyDollar[4].node.Left == nil {
Yyerror("missing condition in if statement")
}
case 84:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:800
+ //line go.y:802
{
yyVAL.list = nil
}
case 85:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:804
+ //line go.y:806
{
yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
}
case 86:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:809
+ //line go.y:811
{
yyVAL.list = nil
}
case 87:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:813
+ //line go.y:815
{
l := &NodeList{N: yyDollar[2].node}
l.End = l
}
case 88:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:821
+ //line go.y:823
{
markdcl()
}
case 89:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:825
+ //line go.y:827
{
var n *Node
n = yyDollar[3].node.Left
}
case 90:
yyDollar = yyS[yypt-7 : yypt+1]
- //line go.y:834
+ //line go.y:836
{
yyVAL.node = yyDollar[3].node
yyVAL.node.Op = OSWITCH
}
case 91:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:844
+ //line go.y:846
{
typesw = Nod(OXXX, typesw, nil)
}
case 92:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:848
+ //line go.y:850
{
yyVAL.node = Nod(OSELECT, nil, nil)
yyVAL.node.Lineno = typesw.Lineno
}
case 94:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:861
+ //line go.y:863
{
yyVAL.node = Nod(OOROR, yyDollar[1].node, yyDollar[3].node)
}
case 95:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:865
+ //line go.y:867
{
yyVAL.node = Nod(OANDAND, yyDollar[1].node, yyDollar[3].node)
}
case 96:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:869
+ //line go.y:871
{
yyVAL.node = Nod(OEQ, yyDollar[1].node, yyDollar[3].node)
}
case 97:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:873
+ //line go.y:875
{
yyVAL.node = Nod(ONE, yyDollar[1].node, yyDollar[3].node)
}
case 98:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:877
+ //line go.y:879
{
yyVAL.node = Nod(OLT, yyDollar[1].node, yyDollar[3].node)
}
case 99:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:881
+ //line go.y:883
{
yyVAL.node = Nod(OLE, yyDollar[1].node, yyDollar[3].node)
}
case 100:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:885
+ //line go.y:887
{
yyVAL.node = Nod(OGE, yyDollar[1].node, yyDollar[3].node)
}
case 101:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:889
+ //line go.y:891
{
yyVAL.node = Nod(OGT, yyDollar[1].node, yyDollar[3].node)
}
case 102:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:893
+ //line go.y:895
{
yyVAL.node = Nod(OADD, yyDollar[1].node, yyDollar[3].node)
}
case 103:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:897
+ //line go.y:899
{
yyVAL.node = Nod(OSUB, yyDollar[1].node, yyDollar[3].node)
}
case 104:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:901
+ //line go.y:903
{
yyVAL.node = Nod(OOR, yyDollar[1].node, yyDollar[3].node)
}
case 105:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:905
+ //line go.y:907
{
yyVAL.node = Nod(OXOR, yyDollar[1].node, yyDollar[3].node)
}
case 106:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:909
+ //line go.y:911
{
yyVAL.node = Nod(OMUL, yyDollar[1].node, yyDollar[3].node)
}
case 107:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:913
+ //line go.y:915
{
yyVAL.node = Nod(ODIV, yyDollar[1].node, yyDollar[3].node)
}
case 108:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:917
+ //line go.y:919
{
yyVAL.node = Nod(OMOD, yyDollar[1].node, yyDollar[3].node)
}
case 109:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:921
+ //line go.y:923
{
yyVAL.node = Nod(OAND, yyDollar[1].node, yyDollar[3].node)
}
case 110:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:925
+ //line go.y:927
{
yyVAL.node = Nod(OANDNOT, yyDollar[1].node, yyDollar[3].node)
}
case 111:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:929
+ //line go.y:931
{
yyVAL.node = Nod(OLSH, yyDollar[1].node, yyDollar[3].node)
}
case 112:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:933
+ //line go.y:935
{
yyVAL.node = Nod(ORSH, yyDollar[1].node, yyDollar[3].node)
}
case 113:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:938
+ //line go.y:940
{
yyVAL.node = Nod(OSEND, yyDollar[1].node, yyDollar[3].node)
}
case 115:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:945
+ //line go.y:947
{
yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
}
case 116:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:949
+ //line go.y:951
{
if yyDollar[2].node.Op == OCOMPLIT {
// Special case for &T{...}: turn into (*T){...}.
}
case 117:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:960
+ //line go.y:962
{
yyVAL.node = Nod(OPLUS, yyDollar[2].node, nil)
}
case 118:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:964
+ //line go.y:966
{
yyVAL.node = Nod(OMINUS, yyDollar[2].node, nil)
}
case 119:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:968
+ //line go.y:970
{
yyVAL.node = Nod(ONOT, yyDollar[2].node, nil)
}
case 120:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:972
+ //line go.y:974
{
Yyerror("the bitwise complement operator is ^")
yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
}
case 121:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:977
+ //line go.y:979
{
yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
}
case 122:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:981
+ //line go.y:983
{
yyVAL.node = Nod(ORECV, yyDollar[2].node, nil)
}
case 123:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:991
+ //line go.y:993
{
yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
}
case 124:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:995
+ //line go.y:997
{
yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
yyVAL.node.List = yyDollar[3].list
}
case 125:
yyDollar = yyS[yypt-6 : yypt+1]
- //line go.y:1000
+ //line go.y:1002
{
yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
yyVAL.node.List = yyDollar[3].list
}
case 126:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1008
+ //line go.y:1010
{
yyVAL.node = nodlit(yyDollar[1].val)
}
case 128:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1013
+ //line go.y:1015
{
if yyDollar[1].node.Op == OPACK {
var s *Sym
}
case 129:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1024
+ //line go.y:1026
{
yyVAL.node = Nod(ODOTTYPE, yyDollar[1].node, yyDollar[4].node)
}
case 130:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1028
+ //line go.y:1030
{
yyVAL.node = Nod(OTYPESW, nil, yyDollar[1].node)
}
case 131:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1032
+ //line go.y:1034
{
yyVAL.node = Nod(OINDEX, yyDollar[1].node, yyDollar[3].node)
}
case 132:
yyDollar = yyS[yypt-6 : yypt+1]
- //line go.y:1036
+ //line go.y:1038
{
yyVAL.node = Nod(OSLICE, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, yyDollar[5].node))
}
case 133:
yyDollar = yyS[yypt-8 : yypt+1]
- //line go.y:1040
+ //line go.y:1042
{
if yyDollar[5].node == nil {
Yyerror("middle index required in 3-index slice")
}
case 135:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1051
+ //line go.y:1053
{
// conversion
yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
}
case 136:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1057
+ //line go.y:1059
{
yyVAL.node = yyDollar[3].node
yyVAL.node.Right = yyDollar[1].node
}
case 137:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1064
+ //line go.y:1066
{
yyVAL.node = yyDollar[3].node
yyVAL.node.Right = yyDollar[1].node
}
case 138:
yyDollar = yyS[yypt-7 : yypt+1]
- //line go.y:1070
+ //line go.y:1072
{
Yyerror("cannot parenthesize type in composite literal")
yyVAL.node = yyDollar[5].node
}
case 140:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1079
+ //line go.y:1081
{
// composite expression.
// make node early so we get the right line number.
}
case 141:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1087
+ //line go.y:1089
{
yyVAL.node = Nod(OKEY, yyDollar[1].node, yyDollar[3].node)
}
case 142:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1093
+ //line go.y:1095
{
// These nodes do not carry line numbers.
// Since a composite literal commonly spans several lines,
}
case 143:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1106
+ //line go.y:1108
{
yyVAL.node = yyDollar[2].node
yyVAL.node.List = yyDollar[3].list
}
case 145:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1114
+ //line go.y:1116
{
yyVAL.node = yyDollar[2].node
yyVAL.node.List = yyDollar[3].list
}
case 147:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1122
+ //line go.y:1124
{
yyVAL.node = yyDollar[2].node
}
case 151:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1143
+ //line go.y:1145
{
yyVAL.i = LBODY
}
case 152:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1147
+ //line go.y:1149
{
yyVAL.i = '{'
}
case 153:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1158
+ //line go.y:1160
{
if yyDollar[1].sym == nil {
yyVAL.node = nil
}
case 154:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1168
+ //line go.y:1170
{
yyVAL.node = dclname(yyDollar[1].sym)
}
case 155:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1173
+ //line go.y:1175
{
yyVAL.node = nil
}
case 157:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1180
+ //line go.y:1182
{
yyVAL.sym = yyDollar[1].sym
// during imports, unqualified non-exported identifiers are from builtinpkg
}
case 159:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1189
+ //line go.y:1191
{
yyVAL.sym = nil
}
case 160:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1195
+ //line go.y:1197
{
var p *Pkg
}
case 161:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1209
+ //line go.y:1211
{
var p *Pkg
}
case 162:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1225
+ //line go.y:1227
{
yyVAL.node = oldname(yyDollar[1].sym)
if yyVAL.node.Name != nil && yyVAL.node.Name.Pack != nil {
}
case 164:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1246
+ //line go.y:1248
{
Yyerror("final argument in variadic function missing type")
yyVAL.node = Nod(ODDD, typenod(typ(TINTER)), nil)
}
case 165:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1251
+ //line go.y:1253
{
yyVAL.node = Nod(ODDD, yyDollar[2].node, nil)
}
case 171:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1262
+ //line go.y:1264
{
yyVAL.node = yyDollar[2].node
}
case 175:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1271
+ //line go.y:1273
{
yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
}
case 180:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1281
+ //line go.y:1283
{
yyVAL.node = yyDollar[2].node
}
case 190:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1302
+ //line go.y:1304
{
if yyDollar[1].node.Op == OPACK {
var s *Sym
}
case 191:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1315
+ //line go.y:1317
{
yyVAL.node = Nod(OTARRAY, yyDollar[2].node, yyDollar[4].node)
}
case 192:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1319
+ //line go.y:1321
{
// array literal of nelem
yyVAL.node = Nod(OTARRAY, Nod(ODDD, nil, nil), yyDollar[4].node)
}
case 193:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1324
+ //line go.y:1326
{
yyVAL.node = Nod(OTCHAN, yyDollar[2].node, nil)
yyVAL.node.Etype = Cboth
}
case 194:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1329
+ //line go.y:1331
{
yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
yyVAL.node.Etype = Csend
}
case 195:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1334
+ //line go.y:1336
{
yyVAL.node = Nod(OTMAP, yyDollar[3].node, yyDollar[5].node)
}
case 198:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1342
+ //line go.y:1344
{
yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
}
case 199:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1348
+ //line go.y:1350
{
yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
yyVAL.node.Etype = Crecv
}
case 200:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1355
+ //line go.y:1357
{
yyVAL.node = Nod(OTSTRUCT, nil, nil)
yyVAL.node.List = yyDollar[3].list
}
case 201:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1361
+ //line go.y:1363
{
yyVAL.node = Nod(OTSTRUCT, nil, nil)
fixlbrace(yyDollar[2].i)
}
case 202:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1368
+ //line go.y:1370
{
yyVAL.node = Nod(OTINTER, nil, nil)
yyVAL.node.List = yyDollar[3].list
}
case 203:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1374
+ //line go.y:1376
{
yyVAL.node = Nod(OTINTER, nil, nil)
fixlbrace(yyDollar[2].i)
}
case 204:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1385
+ //line go.y:1387
{
yyVAL.node = yyDollar[2].node
if yyVAL.node == nil {
}
case 205:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1407
+ //line go.y:1409
{
var t *Node
}
case 206:
yyDollar = yyS[yypt-8 : yypt+1]
- //line go.y:1438
+ //line go.y:1440
{
var rcvr, t *Node
}
case 207:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1476
+ //line go.y:1478
{
var s *Sym
var t *Type
}
case 208:
yyDollar = yyS[yypt-8 : yypt+1]
- //line go.y:1501
+ //line go.y:1503
{
yyVAL.node = methodname1(newname(yyDollar[4].sym), yyDollar[2].list.N.Right)
yyVAL.node.Type = functype(yyDollar[2].list.N, yyDollar[6].list, yyDollar[8].list)
}
case 209:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1519
+ //line go.y:1521
{
yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
yyVAL.node = Nod(OTFUNC, nil, nil)
}
case 210:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1527
+ //line go.y:1529
{
yyVAL.list = nil
}
case 211:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1531
+ //line go.y:1533
{
yyVAL.list = yyDollar[2].list
if yyVAL.list == nil {
}
case 212:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1540
+ //line go.y:1542
{
yyVAL.list = nil
}
case 213:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1544
+ //line go.y:1546
{
yyVAL.list = list1(Nod(ODCLFIELD, nil, yyDollar[1].node))
}
case 214:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1548
+ //line go.y:1550
{
yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
yyVAL.list = yyDollar[2].list
}
case 215:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1555
+ //line go.y:1557
{
closurehdr(yyDollar[1].node)
}
case 216:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1561
+ //line go.y:1563
{
yyVAL.node = closurebody(yyDollar[3].list)
fixlbrace(yyDollar[2].i)
}
case 217:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1566
+ //line go.y:1568
{
yyVAL.node = closurebody(nil)
}
case 218:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1577
+ //line go.y:1579
{
yyVAL.list = nil
}
case 219:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1581
+ //line go.y:1583
{
yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
if nsyntaxerrors == 0 {
}
case 221:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1599
+ //line go.y:1601
{
yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
}
case 223:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1606
+ //line go.y:1608
{
yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
}
case 224:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1612
+ //line go.y:1614
{
yyVAL.list = list1(yyDollar[1].node)
}
case 225:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1616
+ //line go.y:1618
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 227:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1623
+ //line go.y:1625
{
yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
}
case 228:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1629
+ //line go.y:1631
{
yyVAL.list = list1(yyDollar[1].node)
}
case 229:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1633
+ //line go.y:1635
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 230:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1639
+ //line go.y:1641
{
var l *NodeList
}
case 231:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1663
+ //line go.y:1665
{
yyDollar[1].node.SetVal(yyDollar[2].val)
yyVAL.list = list1(yyDollar[1].node)
}
case 232:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1668
+ //line go.y:1670
{
yyDollar[2].node.SetVal(yyDollar[4].val)
yyVAL.list = list1(yyDollar[2].node)
}
case 233:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1674
+ //line go.y:1676
{
yyDollar[2].node.Right = Nod(OIND, yyDollar[2].node.Right, nil)
yyDollar[2].node.SetVal(yyDollar[3].val)
}
case 234:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1680
+ //line go.y:1682
{
yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
yyDollar[3].node.SetVal(yyDollar[5].val)
}
case 235:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:1687
+ //line go.y:1689
{
yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
yyDollar[3].node.SetVal(yyDollar[5].val)
}
case 236:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1696
+ //line go.y:1698
{
var n *Node
}
case 237:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1706
+ //line go.y:1708
{
var pkg *Pkg
}
case 238:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1721
+ //line go.y:1723
{
yyVAL.node = embedded(yyDollar[1].sym, localpkg)
}
case 239:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1727
+ //line go.y:1729
{
yyVAL.node = Nod(ODCLFIELD, yyDollar[1].node, yyDollar[2].node)
ifacedcl(yyVAL.node)
}
case 240:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1732
+ //line go.y:1734
{
yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[1].sym))
}
case 241:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1736
+ //line go.y:1738
{
yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[2].sym))
Yyerror("cannot parenthesize embedded type")
}
case 242:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1743
+ //line go.y:1745
{
// without func keyword
yyDollar[2].list = checkarglist(yyDollar[2].list, 1)
}
case 244:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1757
+ //line go.y:1759
{
yyVAL.node = Nod(ONONAME, nil, nil)
yyVAL.node.Sym = yyDollar[1].sym
}
case 245:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1763
+ //line go.y:1765
{
yyVAL.node = Nod(ONONAME, nil, nil)
yyVAL.node.Sym = yyDollar[1].sym
}
case 247:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1772
+ //line go.y:1774
{
yyVAL.list = list1(yyDollar[1].node)
}
case 248:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1776
+ //line go.y:1778
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 249:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1781
+ //line go.y:1783
{
yyVAL.list = nil
}
case 250:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1785
+ //line go.y:1787
{
yyVAL.list = yyDollar[1].list
}
case 251:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1793
+ //line go.y:1795
{
yyVAL.node = nil
}
case 253:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1798
+ //line go.y:1800
{
yyVAL.node = liststmt(yyDollar[1].list)
}
case 255:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1803
+ //line go.y:1805
{
yyVAL.node = nil
}
case 261:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1814
+ //line go.y:1816
{
yyDollar[1].node = Nod(OLABEL, yyDollar[1].node, nil)
yyDollar[1].node.Sym = dclstack // context, for goto restrictions
}
case 262:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:1819
+ //line go.y:1821
{
var l *NodeList
}
case 263:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1830
+ //line go.y:1832
{
// will be converted to OFALL
yyVAL.node = Nod(OXFALL, nil, nil)
}
case 264:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1836
+ //line go.y:1838
{
yyVAL.node = Nod(OBREAK, yyDollar[2].node, nil)
}
case 265:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1840
+ //line go.y:1842
{
yyVAL.node = Nod(OCONTINUE, yyDollar[2].node, nil)
}
case 266:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1844
+ //line go.y:1846
{
yyVAL.node = Nod(OPROC, yyDollar[2].node, nil)
}
case 267:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1848
+ //line go.y:1850
{
yyVAL.node = Nod(ODEFER, yyDollar[2].node, nil)
}
case 268:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1852
+ //line go.y:1854
{
yyVAL.node = Nod(OGOTO, yyDollar[2].node, nil)
yyVAL.node.Sym = dclstack // context, for goto restrictions
}
case 269:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1857
+ //line go.y:1859
{
yyVAL.node = Nod(ORETURN, nil, nil)
yyVAL.node.List = yyDollar[2].list
}
case 270:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1879
+ //line go.y:1881
{
yyVAL.list = nil
if yyDollar[1].node != nil {
}
case 271:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1886
+ //line go.y:1888
{
yyVAL.list = yyDollar[1].list
if yyDollar[3].node != nil {
}
case 272:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1895
+ //line go.y:1897
{
yyVAL.list = list1(yyDollar[1].node)
}
case 273:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1899
+ //line go.y:1901
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 274:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1905
+ //line go.y:1907
{
yyVAL.list = list1(yyDollar[1].node)
}
case 275:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1909
+ //line go.y:1911
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 276:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1915
+ //line go.y:1917
{
yyVAL.list = list1(yyDollar[1].node)
}
case 277:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1919
+ //line go.y:1921
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 278:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1925
+ //line go.y:1927
{
yyVAL.list = list1(yyDollar[1].node)
}
case 279:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1929
+ //line go.y:1931
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 280:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1938
+ //line go.y:1940
{
yyVAL.list = list1(yyDollar[1].node)
}
case 281:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:1942
+ //line go.y:1944
{
yyVAL.list = list1(yyDollar[1].node)
}
case 282:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1946
+ //line go.y:1948
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 283:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:1950
+ //line go.y:1952
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 284:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1955
+ //line go.y:1957
{
yyVAL.list = nil
}
case 285:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:1959
+ //line go.y:1961
{
yyVAL.list = yyDollar[1].list
}
case 290:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1973
+ //line go.y:1975
{
yyVAL.node = nil
}
case 292:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1979
+ //line go.y:1981
{
yyVAL.list = nil
}
case 294:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1985
+ //line go.y:1987
{
yyVAL.node = nil
}
case 296:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1991
+ //line go.y:1993
{
yyVAL.list = nil
}
case 298:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:1997
+ //line go.y:1999
{
yyVAL.list = nil
}
case 300:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:2003
+ //line go.y:2005
{
yyVAL.list = nil
}
case 302:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:2009
+ //line go.y:2011
{
yyVAL.val.U = nil
}
case 304:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2019
+ //line go.y:2021
{
importimport(yyDollar[2].sym, yyDollar[3].val.U.(string))
}
case 305:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2023
+ //line go.y:2025
{
importvar(yyDollar[2].sym, yyDollar[3].typ)
}
case 306:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:2027
+ //line go.y:2029
{
importconst(yyDollar[2].sym, Types[TIDEAL], yyDollar[4].node)
}
case 307:
yyDollar = yyS[yypt-6 : yypt+1]
- //line go.y:2031
+ //line go.y:2033
{
importconst(yyDollar[2].sym, yyDollar[3].typ, yyDollar[5].node)
}
case 308:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2035
+ //line go.y:2037
{
importtype(yyDollar[2].typ, yyDollar[3].typ)
}
case 309:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2039
+ //line go.y:2041
{
if yyDollar[2].node == nil {
dclcontext = PEXTERN // since we skip the funcbody below
}
case 310:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2060
+ //line go.y:2062
{
yyVAL.sym = yyDollar[1].sym
structpkg = yyVAL.sym.Pkg
}
case 311:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2067
+ //line go.y:2069
{
yyVAL.typ = pkgtype(yyDollar[1].sym)
importsym(yyDollar[1].sym, OTYPE)
}
case 317:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2087
+ //line go.y:2089
{
yyVAL.typ = pkgtype(yyDollar[1].sym)
}
case 318:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2091
+ //line go.y:2093
{
// predefined name like uint8
yyDollar[1].sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
}
case 319:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2102
+ //line go.y:2104
{
yyVAL.typ = aindex(nil, yyDollar[3].typ)
}
case 320:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2106
+ //line go.y:2108
{
yyVAL.typ = aindex(nodlit(yyDollar[2].val), yyDollar[4].typ)
}
case 321:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:2110
+ //line go.y:2112
{
yyVAL.typ = maptype(yyDollar[3].typ, yyDollar[5].typ)
}
case 322:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2114
+ //line go.y:2116
{
yyVAL.typ = tostruct(yyDollar[3].list)
}
case 323:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2118
+ //line go.y:2120
{
yyVAL.typ = tointerface(yyDollar[3].list)
}
case 324:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:2122
+ //line go.y:2124
{
yyVAL.typ = Ptrto(yyDollar[2].typ)
}
case 325:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:2126
+ //line go.y:2128
{
yyVAL.typ = typ(TCHAN)
yyVAL.typ.Type = yyDollar[2].typ
}
case 326:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2132
+ //line go.y:2134
{
yyVAL.typ = typ(TCHAN)
yyVAL.typ.Type = yyDollar[3].typ
}
case 327:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2138
+ //line go.y:2140
{
yyVAL.typ = typ(TCHAN)
yyVAL.typ.Type = yyDollar[3].typ
}
case 328:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2146
+ //line go.y:2148
{
yyVAL.typ = typ(TCHAN)
yyVAL.typ.Type = yyDollar[3].typ
}
case 329:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:2154
+ //line go.y:2156
{
yyVAL.typ = functype(nil, yyDollar[3].list, yyDollar[5].list)
}
case 330:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2160
+ //line go.y:2162
{
yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[2].typ))
if yyDollar[1].sym != nil {
}
case 331:
yyDollar = yyS[yypt-4 : yypt+1]
- //line go.y:2168
+ //line go.y:2170
{
var t *Type
}
case 332:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2185
+ //line go.y:2187
{
var s *Sym
var p *Pkg
}
case 333:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:2209
+ //line go.y:2211
{
yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(functype(fakethis(), yyDollar[3].list, yyDollar[5].list)))
}
case 334:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2213
+ //line go.y:2215
{
yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ))
}
case 335:
yyDollar = yyS[yypt-0 : yypt+1]
- //line go.y:2218
+ //line go.y:2220
{
yyVAL.list = nil
}
case 337:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2225
+ //line go.y:2227
{
yyVAL.list = yyDollar[2].list
}
case 338:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2229
+ //line go.y:2231
{
yyVAL.list = list1(Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ)))
}
case 339:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2239
+ //line go.y:2241
{
yyVAL.node = nodlit(yyDollar[1].val)
}
case 340:
yyDollar = yyS[yypt-2 : yypt+1]
- //line go.y:2243
+ //line go.y:2245
{
yyVAL.node = nodlit(yyDollar[2].val)
switch yyVAL.node.Val().Ctype() {
}
case 341:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2261
+ //line go.y:2263
{
yyVAL.node = oldname(Pkglookup(yyDollar[1].sym.Name, builtinpkg))
if yyVAL.node.Op != OLITERAL {
}
case 343:
yyDollar = yyS[yypt-5 : yypt+1]
- //line go.y:2271
+ //line go.y:2273
{
if yyDollar[2].node.Val().Ctype() == CTRUNE && yyDollar[4].node.Val().Ctype() == CTINT {
yyVAL.node = yyDollar[2].node
}
case 346:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2287
+ //line go.y:2289
{
yyVAL.list = list1(yyDollar[1].node)
}
case 347:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2291
+ //line go.y:2293
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 348:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2297
+ //line go.y:2299
{
yyVAL.list = list1(yyDollar[1].node)
}
case 349:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2301
+ //line go.y:2303
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
case 350:
yyDollar = yyS[yypt-1 : yypt+1]
- //line go.y:2307
+ //line go.y:2309
{
yyVAL.list = list1(yyDollar[1].node)
}
case 351:
yyDollar = yyS[yypt-3 : yypt+1]
- //line go.y:2311
+ //line go.y:2313
{
yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
}
package main
var builddeps = map[string][]string{
- "bufio": {"bytes", "errors", "io", "runtime", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "bytes": {"errors", "io", "runtime", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "container/heap": {"runtime", "sort"},
- "crypto": {"errors", "hash", "io", "math", "runtime", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "crypto/sha1": {"crypto", "errors", "hash", "io", "math", "runtime", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "debug/dwarf": {"encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "debug/elf": {"bytes", "debug/dwarf", "encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "encoding": {"runtime"},
- "encoding/base64": {"errors", "io", "math", "runtime", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "encoding/binary": {"errors", "io", "math", "reflect", "runtime", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "encoding/json": {"bytes", "encoding", "encoding/base64", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "errors": {"runtime"},
- "flag": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "fmt": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "go/ast": {"bytes", "errors", "fmt", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/build": {"bufio", "bytes", "errors", "fmt", "go/ast", "go/doc", "go/parser", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/doc": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/parser": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "os", "path/filepath", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/scanner": {"bytes", "errors", "fmt", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "go/token": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "hash": {"errors", "io", "runtime", "sync", "sync/atomic"},
- "internal/singleflight": {"runtime", "sync", "sync/atomic"},
- "internal/syscall/windows": {"errors", "runtime", "sync", "sync/atomic", "syscall", "unicode/utf16"},
- "internal/syscall/windows/registry": {"errors", "io", "runtime", "sync", "sync/atomic", "syscall", "unicode/utf16"},
- "io": {"errors", "runtime", "sync", "sync/atomic"},
- "io/ioutil": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "log": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "math": {"runtime"},
- "net/url": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "os": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "runtime", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "os/exec": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "os/signal": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
- "path": {"errors", "io", "runtime", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "path/filepath": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "sort", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "reflect": {"errors", "math", "runtime", "strconv", "sync", "sync/atomic", "unicode/utf8"},
- "regexp": {"bytes", "errors", "io", "math", "regexp/syntax", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "regexp/syntax": {"bytes", "errors", "io", "math", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "runtime": {},
- "sort": {"runtime"},
- "strconv": {"errors", "math", "runtime", "unicode/utf8"},
- "strings": {"errors", "io", "runtime", "sync", "sync/atomic", "unicode", "unicode/utf8"},
- "sync": {"runtime", "sync/atomic"},
- "sync/atomic": {"runtime"},
- "syscall": {"errors", "runtime", "sync", "sync/atomic", "unicode/utf16"},
- "text/template": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path/filepath", "reflect", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "text/template/parse": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
- "time": {"errors", "internal/syscall/windows/registry", "io", "runtime", "sync", "sync/atomic", "syscall", "unicode/utf16"},
- "unicode": {"runtime"},
- "unicode/utf16": {"runtime"},
- "unicode/utf8": {"runtime"},
- "cmd/go": {"bufio", "bytes", "container/heap", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "bufio": {"bytes", "errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "bytes": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "container/heap": {"runtime", "runtime/internal/atomic", "sort"},
+ "crypto": {"errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "crypto/sha1": {"crypto", "errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "debug/dwarf": {"encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "debug/elf": {"bytes", "debug/dwarf", "encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "encoding": {"runtime", "runtime/internal/atomic"},
+ "encoding/base64": {"errors", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "encoding/binary": {"errors", "io", "math", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "encoding/json": {"bytes", "encoding", "encoding/base64", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "errors": {"runtime", "runtime/internal/atomic"},
+ "flag": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "fmt": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "go/ast": {"bytes", "errors", "fmt", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/build": {"bufio", "bytes", "errors", "fmt", "go/ast", "go/doc", "go/parser", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/doc": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/parser": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/scanner": {"bytes", "errors", "fmt", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "go/token": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "hash": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
+ "internal/singleflight": {"runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
+ "internal/syscall/windows": {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+ "internal/syscall/windows/registry": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+ "io": {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
+ "io/ioutil": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "log": {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "math": {"runtime", "runtime/internal/atomic"},
+ "net/url": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "os": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "os/exec": {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "os/signal": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+ "path": {"errors", "io", "runtime", "runtime/internal/atomic", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "path/filepath": {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "sort", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "reflect": {"errors", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+ "regexp": {"bytes", "errors", "io", "math", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "regexp/syntax": {"bytes", "errors", "io", "math", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "runtime": {"runtime/internal/atomic"},
+ "runtime/internal/atomic": {},
+ "sort": {"runtime", "runtime/internal/atomic"},
+ "strconv": {"errors", "math", "runtime", "runtime/internal/atomic", "unicode/utf8"},
+ "strings": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+ "sync": {"runtime", "runtime/internal/atomic", "sync/atomic"},
+ "sync/atomic": {"runtime", "runtime/internal/atomic"},
+ "syscall": {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode/utf16"},
+ "text/template": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "text/template/parse": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+ "time": {"errors", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+ "unicode": {"runtime", "runtime/internal/atomic"},
+ "unicode/utf16": {"runtime", "runtime/internal/atomic"},
+ "unicode/utf8": {"runtime", "runtime/internal/atomic"},
+ "cmd/go": {"bufio", "bytes", "container/heap", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
}
if p.Name == "main" {
gcargs[1] = "main"
}
- if p.Standard && p.ImportPath == "runtime" {
+ if p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) {
// runtime compiles with a special gc flag to emit
// additional reflect type data.
gcargs = append(gcargs, "-+")
importPaths = append(importPaths, "runtime/cgo")
}
- // Everything depends on runtime, except runtime and unsafe.
- if !p.Standard || (p.ImportPath != "runtime" && p.ImportPath != "unsafe") {
+ // Everything depends on runtime, except runtime, its subpackages, and unsafe.
+ if !p.Standard || (p.ImportPath != "runtime" && !strings.HasPrefix(p.ImportPath, "runtime/") && p.ImportPath != "unsafe") {
importPaths = append(importPaths, "runtime")
// When race detection enabled everything depends on runtime/race.
// Exclude certain packages to avoid circular dependencies.
//
var pkgDeps = map[string][]string{
// L0 is the lowest level, core, nearly unavoidable packages.
- "errors": {},
- "io": {"errors", "sync"},
- "runtime": {"unsafe"},
+ "errors": {},
+ "io": {"errors", "sync"},
+ "runtime": {"unsafe", "runtime/internal/atomic"},
+ "runtime/internal/atomic": {"unsafe"},
"sync": {"runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
"unsafe": {},
"errors",
"io",
"runtime",
+ "runtime/internal/atomic",
"sync",
"sync/atomic",
"unsafe",
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
-// bool cas(int32 *val, int32 old, int32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// }else
-// return 0;
-TEXT runtime·cas(SB), NOSPLIT, $0-13
- MOVL ptr+0(FP), BX
- MOVL old+4(FP), AX
- MOVL new+8(FP), CX
- LOCK
- CMPXCHGL CX, 0(BX)
- SETEQ ret+12(FP)
- RET
-
-TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
- JMP runtime·cas(SB)
-
-TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-8
- JMP runtime·atomicload(SB)
-
-TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-8
- JMP runtime·atomicload(SB)
-
-TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-8
- JMP runtime·atomicstore(SB)
-
-// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
-// Atomically:
-// if(*val == *old){
-// *val = new;
-// return 1;
-// } else {
-// return 0;
-// }
-TEXT runtime·cas64(SB), NOSPLIT, $0-21
- MOVL ptr+0(FP), BP
- MOVL old_lo+4(FP), AX
- MOVL old_hi+8(FP), DX
- MOVL new_lo+12(FP), BX
- MOVL new_hi+16(FP), CX
- LOCK
- CMPXCHG8B 0(BP)
- SETEQ ret+20(FP)
- RET
-
-// bool casp(void **p, void *old, void *new)
-// Atomically:
-// if(*p == old){
-// *p = new;
-// return 1;
-// }else
-// return 0;
-TEXT runtime·casp1(SB), NOSPLIT, $0-13
- MOVL ptr+0(FP), BX
- MOVL old+4(FP), AX
- MOVL new+8(FP), CX
- LOCK
- CMPXCHGL CX, 0(BX)
- SETEQ ret+12(FP)
- RET
-
-// uint32 xadd(uint32 volatile *val, int32 delta)
-// Atomically:
-// *val += delta;
-// return *val;
-TEXT runtime·xadd(SB), NOSPLIT, $0-12
- MOVL ptr+0(FP), BX
- MOVL delta+4(FP), AX
- MOVL AX, CX
- LOCK
- XADDL AX, 0(BX)
- ADDL CX, AX
- MOVL AX, ret+8(FP)
- RET
-
-TEXT runtime·xchg(SB), NOSPLIT, $0-12
- MOVL ptr+0(FP), BX
- MOVL new+4(FP), AX
- XCHGL AX, 0(BX)
- MOVL AX, ret+8(FP)
- RET
-
-TEXT runtime·xchguintptr(SB), NOSPLIT, $0-12
- JMP runtime·xchg(SB)
-
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX
again:
JNZ again
RET
-TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
- MOVL ptr+0(FP), BX
- MOVL val+4(FP), AX
- XCHGL AX, 0(BX)
- RET
-
-TEXT runtime·atomicstore(SB), NOSPLIT, $0-8
- MOVL ptr+0(FP), BX
- MOVL val+4(FP), AX
- XCHGL AX, 0(BX)
- RET
-
-// uint64 atomicload64(uint64 volatile* addr);
-TEXT runtime·atomicload64(SB), NOSPLIT, $0-12
- MOVL ptr+0(FP), AX
- TESTL $7, AX
- JZ 2(PC)
- MOVL 0, AX // crash with nil ptr deref
- LEAL ret_lo+4(FP), BX
- // MOVQ (%EAX), %MM0
- BYTE $0x0f; BYTE $0x6f; BYTE $0x00
- // MOVQ %MM0, 0(%EBX)
- BYTE $0x0f; BYTE $0x7f; BYTE $0x03
- // EMMS
- BYTE $0x0F; BYTE $0x77
- RET
-
-// void runtime·atomicstore64(uint64 volatile* addr, uint64 v);
-TEXT runtime·atomicstore64(SB), NOSPLIT, $0-12
- MOVL ptr+0(FP), AX
- TESTL $7, AX
- JZ 2(PC)
- MOVL 0, AX // crash with nil ptr deref
- // MOVQ and EMMS were introduced on the Pentium MMX.
- // MOVQ 0x8(%ESP), %MM0
- BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
- // MOVQ %MM0, (%EAX)
- BYTE $0x0f; BYTE $0x7f; BYTE $0x00
- // EMMS
- BYTE $0x0F; BYTE $0x77
- // This is essentially a no-op, but it provides required memory fencing.
- // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
- MOVL $0, AX
- LOCK
- XADDL AX, (SP)
- RET
-
-// void runtime·atomicor8(byte volatile*, byte);
-TEXT runtime·atomicor8(SB), NOSPLIT, $0-5
- MOVL ptr+0(FP), AX
- MOVB val+4(FP), BX
- LOCK
- ORB BX, (AX)
- RET
-
-// void runtime·atomicand8(byte volatile*, byte);
-TEXT runtime·atomicand8(SB), NOSPLIT, $0-5
- MOVL ptr+0(FP), AX
- MOVB val+4(FP), BX
- LOCK
- ANDB BX, (AX)
- RET
-
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a
// compile barrier.
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
-// bool cas(int32 *val, int32 old, int32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·cas(SB), NOSPLIT, $0-17
- MOVQ ptr+0(FP), BX
- MOVL old+8(FP), AX
- MOVL new+12(FP), CX
- LOCK
- CMPXCHGL CX, 0(BX)
- SETEQ ret+16(FP)
- RET
-
-// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
-// Atomically:
-// if(*val == *old){
-// *val = new;
-// return 1;
-// } else {
-// return 0;
-// }
-TEXT runtime·cas64(SB), NOSPLIT, $0-25
- MOVQ ptr+0(FP), BX
- MOVQ old+8(FP), AX
- MOVQ new+16(FP), CX
- LOCK
- CMPXCHGQ CX, 0(BX)
- SETEQ ret+24(FP)
- RET
-
-TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
- JMP runtime·cas64(SB)
-
-TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-16
- JMP runtime·atomicload64(SB)
-
-TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-16
- JMP runtime·atomicload64(SB)
-
-TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
- JMP runtime·atomicstore64(SB)
-
-// bool casp(void **val, void *old, void *new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·casp1(SB), NOSPLIT, $0-25
- MOVQ ptr+0(FP), BX
- MOVQ old+8(FP), AX
- MOVQ new+16(FP), CX
- LOCK
- CMPXCHGQ CX, 0(BX)
- SETEQ ret+24(FP)
- RET
-
-// uint32 xadd(uint32 volatile *val, int32 delta)
-// Atomically:
-// *val += delta;
-// return *val;
-TEXT runtime·xadd(SB), NOSPLIT, $0-20
- MOVQ ptr+0(FP), BX
- MOVL delta+8(FP), AX
- MOVL AX, CX
- LOCK
- XADDL AX, 0(BX)
- ADDL CX, AX
- MOVL AX, ret+16(FP)
- RET
-
-TEXT runtime·xadd64(SB), NOSPLIT, $0-24
- MOVQ ptr+0(FP), BX
- MOVQ delta+8(FP), AX
- MOVQ AX, CX
- LOCK
- XADDQ AX, 0(BX)
- ADDQ CX, AX
- MOVQ AX, ret+16(FP)
- RET
-
-TEXT runtime·xadduintptr(SB), NOSPLIT, $0-24
- JMP runtime·xadd64(SB)
-
-TEXT runtime·xchg(SB), NOSPLIT, $0-20
- MOVQ ptr+0(FP), BX
- MOVL new+8(FP), AX
- XCHGL AX, 0(BX)
- MOVL AX, ret+16(FP)
- RET
-
-TEXT runtime·xchg64(SB), NOSPLIT, $0-24
- MOVQ ptr+0(FP), BX
- MOVQ new+8(FP), AX
- XCHGQ AX, 0(BX)
- MOVQ AX, ret+16(FP)
- RET
-
-TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
- JMP runtime·xchg64(SB)
-
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX
again:
JNZ again
RET
-TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
- MOVQ ptr+0(FP), BX
- MOVQ val+8(FP), AX
- XCHGQ AX, 0(BX)
- RET
-
-TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
- MOVQ ptr+0(FP), BX
- MOVL val+8(FP), AX
- XCHGL AX, 0(BX)
- RET
-
-TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
- MOVQ ptr+0(FP), BX
- MOVQ val+8(FP), AX
- XCHGQ AX, 0(BX)
- RET
-
-// void runtime·atomicor8(byte volatile*, byte);
-TEXT runtime·atomicor8(SB), NOSPLIT, $0-9
- MOVQ ptr+0(FP), AX
- MOVB val+8(FP), BX
- LOCK
- ORB BX, (AX)
- RET
-
-// void runtime·atomicand8(byte volatile*, byte);
-TEXT runtime·atomicand8(SB), NOSPLIT, $0-9
- MOVQ ptr+0(FP), AX
- MOVB val+8(FP), BX
- LOCK
- ANDB BX, (AX)
- RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
-// bool cas(int32 *val, int32 old, int32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·cas(SB), NOSPLIT, $0-17
- MOVL ptr+0(FP), BX
- MOVL old+4(FP), AX
- MOVL new+8(FP), CX
- LOCK
- CMPXCHGL CX, 0(BX)
- SETEQ ret+16(FP)
- RET
-
-TEXT runtime·casuintptr(SB), NOSPLIT, $0-17
- JMP runtime·cas(SB)
-
-TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-12
- JMP runtime·atomicload(SB)
-
-TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-12
- JMP runtime·atomicload(SB)
-
-TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-12
- JMP runtime·atomicstore(SB)
-
-// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
-// Atomically:
-// if(*val == *old){
-// *val = new;
-// return 1;
-// } else {
-// return 0;
-// }
-TEXT runtime·cas64(SB), NOSPLIT, $0-25
- MOVL ptr+0(FP), BX
- MOVQ old+8(FP), AX
- MOVQ new+16(FP), CX
- LOCK
- CMPXCHGQ CX, 0(BX)
- SETEQ ret+24(FP)
- RET
-
-// bool casp(void **val, void *old, void *new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·casp1(SB), NOSPLIT, $0-17
- MOVL ptr+0(FP), BX
- MOVL old+4(FP), AX
- MOVL new+8(FP), CX
- LOCK
- CMPXCHGL CX, 0(BX)
- SETEQ ret+16(FP)
- RET
-
-// uint32 xadd(uint32 volatile *val, int32 delta)
-// Atomically:
-// *val += delta;
-// return *val;
-TEXT runtime·xadd(SB), NOSPLIT, $0-12
- MOVL ptr+0(FP), BX
- MOVL delta+4(FP), AX
- MOVL AX, CX
- LOCK
- XADDL AX, 0(BX)
- ADDL CX, AX
- MOVL AX, ret+8(FP)
- RET
-
-TEXT runtime·xadd64(SB), NOSPLIT, $0-24
- MOVL ptr+0(FP), BX
- MOVQ delta+8(FP), AX
- MOVQ AX, CX
- LOCK
- XADDQ AX, 0(BX)
- ADDQ CX, AX
- MOVQ AX, ret+16(FP)
- RET
-
-TEXT runtime·xadduintptr(SB), NOSPLIT, $0-12
- JMP runtime·xadd(SB)
-
-TEXT runtime·xchg(SB), NOSPLIT, $0-12
- MOVL ptr+0(FP), BX
- MOVL new+4(FP), AX
- XCHGL AX, 0(BX)
- MOVL AX, ret+8(FP)
- RET
-
-TEXT runtime·xchg64(SB), NOSPLIT, $0-24
- MOVL ptr+0(FP), BX
- MOVQ new+8(FP), AX
- XCHGQ AX, 0(BX)
- MOVQ AX, ret+16(FP)
- RET
-
-TEXT runtime·xchguintptr(SB), NOSPLIT, $0-12
- JMP runtime·xchg(SB)
-
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX
again:
JNZ again
RET
-TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
- MOVL ptr+0(FP), BX
- MOVL val+4(FP), AX
- XCHGL AX, 0(BX)
- RET
-
-TEXT runtime·atomicstore(SB), NOSPLIT, $0-8
- MOVL ptr+0(FP), BX
- MOVL val+4(FP), AX
- XCHGL AX, 0(BX)
- RET
-
-TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
- MOVL ptr+0(FP), BX
- MOVQ val+8(FP), AX
- XCHGQ AX, 0(BX)
- RET
-
-// void runtime·atomicor8(byte volatile*, byte);
-TEXT runtime·atomicor8(SB), NOSPLIT, $0-5
- MOVL ptr+0(FP), BX
- MOVB val+4(FP), AX
- LOCK
- ORB AX, 0(BX)
- RET
-
-// void runtime·atomicand8(byte volatile*, byte);
-TEXT runtime·atomicand8(SB), NOSPLIT, $0-5
- MOVL ptr+0(FP), BX
- MOVB val+4(FP), AX
- LOCK
- ANDB AX, 0(BX)
- RET
-
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a
// compile barrier.
MOVW $0, R0
MOVW (R0), R1
-// bool armcas(int32 *val, int32 old, int32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// }else
-// return 0;
-//
-// To implement runtime·cas in sys_$GOOS_arm.s
-// using the native instructions, use:
-//
-// TEXT runtime·cas(SB),NOSPLIT,$0
-// B runtime·armcas(SB)
-//
-TEXT runtime·armcas(SB),NOSPLIT,$0-13
- MOVW valptr+0(FP), R1
- MOVW old+4(FP), R2
- MOVW new+8(FP), R3
-casl:
- LDREX (R1), R0
- CMP R0, R2
- BNE casfail
-
- MOVB runtime·goarm(SB), R11
- CMP $7, R11
- BLT 2(PC)
- WORD $0xf57ff05a // dmb ishst
-
- STREX R3, (R1), R0
- CMP $0, R0
- BNE casl
- MOVW $1, R0
-
- MOVB runtime·goarm(SB), R11
- CMP $7, R11
- BLT 2(PC)
- WORD $0xf57ff05b // dmb ish
-
- MOVB R0, ret+12(FP)
- RET
-casfail:
- MOVW $0, R0
- MOVB R0, ret+12(FP)
- RET
-
-TEXT runtime·casuintptr(SB),NOSPLIT,$0-13
- B runtime·cas(SB)
-
-TEXT runtime·atomicloaduintptr(SB),NOSPLIT,$0-8
- B runtime·atomicload(SB)
-
-TEXT runtime·atomicloaduint(SB),NOSPLIT,$0-8
- B runtime·atomicload(SB)
-
-TEXT runtime·atomicstoreuintptr(SB),NOSPLIT,$0-8
- B runtime·atomicstore(SB)
-
// armPublicationBarrier is a native store/store barrier for ARMv7+.
// On earlier ARM revisions, armPublicationBarrier is a no-op.
// This will not work on SMP ARMv6 machines, if any are in use.
CALLFN(·call536870912, 536870920 )
CALLFN(·call1073741824, 1073741832 )
-// bool cas(uint32 *ptr, uint32 old, uint32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·cas(SB), NOSPLIT, $0-17
- MOVD ptr+0(FP), R0
- MOVW old+8(FP), R1
- MOVW new+12(FP), R2
-again:
- LDAXRW (R0), R3
- CMPW R1, R3
- BNE ok
- STLXRW R2, (R0), R3
- CBNZ R3, again
-ok:
- CSET EQ, R0
- MOVB R0, ret+16(FP)
- RET
-
-TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
- B runtime·cas64(SB)
-
-TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $-8-16
- B runtime·atomicload64(SB)
-
-TEXT runtime·atomicloaduint(SB), NOSPLIT, $-8-16
- B runtime·atomicload64(SB)
-
-TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
- B runtime·atomicstore64(SB)
-
// AES hashing not implemented for ARM64, issue #10109.
TEXT runtime·aeshash(SB),NOSPLIT,$-8-0
MOVW $0, R0
TEXT runtime·aeshashstr(SB),NOSPLIT,$-8-0
MOVW $0, R0
MOVW (R0), R1
-
-// bool casp(void **val, void *old, void *new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·casp1(SB), NOSPLIT, $0-25
- B runtime·cas64(SB)
-
+
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVWU cycles+0(FP), R0
again:
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
-// bool cas(uint32 *ptr, uint32 old, uint32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·cas(SB), NOSPLIT, $0-17
- MOVD ptr+0(FP), R3
- MOVWZ old+8(FP), R4
- MOVWZ new+12(FP), R5
-cas_again:
- SYNC
- LWAR (R3), R6
- CMPW R6, R4
- BNE cas_fail
- STWCCC R5, (R3)
- BNE cas_again
- MOVD $1, R3
- SYNC
- ISYNC
- MOVB R3, ret+16(FP)
- RET
-cas_fail:
- MOVD $0, R3
- BR -5(PC)
-
-// bool runtime·cas64(uint64 *ptr, uint64 old, uint64 new)
-// Atomically:
-// if(*val == *old){
-// *val = new;
-// return 1;
-// } else {
-// return 0;
-// }
-TEXT runtime·cas64(SB), NOSPLIT, $0-25
- MOVD ptr+0(FP), R3
- MOVD old+8(FP), R4
- MOVD new+16(FP), R5
-cas64_again:
- SYNC
- LDAR (R3), R6
- CMP R6, R4
- BNE cas64_fail
- STDCCC R5, (R3)
- BNE cas64_again
- MOVD $1, R3
- SYNC
- ISYNC
- MOVB R3, ret+24(FP)
- RET
-cas64_fail:
- MOVD $0, R3
- BR -5(PC)
-
-TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
- BR runtime·cas64(SB)
-
-TEXT runtime·atomicloaduintptr(SB), NOSPLIT|NOFRAME, $0-16
- BR runtime·atomicload64(SB)
-
-TEXT runtime·atomicloaduint(SB), NOSPLIT|NOFRAME, $0-16
- BR runtime·atomicload64(SB)
-
-TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
- BR runtime·atomicstore64(SB)
-
-// bool casp(void **val, void *old, void *new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// } else
-// return 0;
-TEXT runtime·casp1(SB), NOSPLIT, $0-25
- BR runtime·cas64(SB)
-
-// uint32 xadd(uint32 volatile *ptr, int32 delta)
-// Atomically:
-// *val += delta;
-// return *val;
-TEXT runtime·xadd(SB), NOSPLIT, $0-20
- MOVD ptr+0(FP), R4
- MOVW delta+8(FP), R5
- SYNC
- LWAR (R4), R3
- ADD R5, R3
- STWCCC R3, (R4)
- BNE -4(PC)
- SYNC
- ISYNC
- MOVW R3, ret+16(FP)
- RET
-
-TEXT runtime·xadd64(SB), NOSPLIT, $0-24
- MOVD ptr+0(FP), R4
- MOVD delta+8(FP), R5
- SYNC
- LDAR (R4), R3
- ADD R5, R3
- STDCCC R3, (R4)
- BNE -4(PC)
- SYNC
- ISYNC
- MOVD R3, ret+16(FP)
- RET
-
-TEXT runtime·xchg(SB), NOSPLIT, $0-20
- MOVD ptr+0(FP), R4
- MOVW new+8(FP), R5
- SYNC
- LWAR (R4), R3
- STWCCC R5, (R4)
- BNE -3(PC)
- SYNC
- ISYNC
- MOVW R3, ret+16(FP)
- RET
-
-TEXT runtime·xchg64(SB), NOSPLIT, $0-24
- MOVD ptr+0(FP), R4
- MOVD new+8(FP), R5
- SYNC
- LDAR (R4), R3
- STDCCC R5, (R4)
- BNE -3(PC)
- SYNC
- ISYNC
- MOVD R3, ret+16(FP)
- RET
-
-TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
- BR runtime·xchg64(SB)
-
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET
-TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
- BR runtime·atomicstore64(SB)
-
-TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
- MOVD ptr+0(FP), R3
- MOVW val+8(FP), R4
- SYNC
- MOVW R4, 0(R3)
- RET
-
-TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
- MOVD ptr+0(FP), R3
- MOVD val+8(FP), R4
- SYNC
- MOVD R4, 0(R3)
- RET
-
-// void runtime·atomicor8(byte volatile*, byte);
-TEXT runtime·atomicor8(SB), NOSPLIT, $0-9
- MOVD ptr+0(FP), R3
- MOVBZ val+8(FP), R4
- // Align ptr down to 4 bytes so we can use 32-bit load/store.
- // R5 = (R3 << 0) & ~3
- RLDCR $0, R3, $~3, R5
- // Compute val shift.
-#ifdef GOARCH_ppc64
- // Big endian. ptr = ptr ^ 3
- XOR $3, R3
-#endif
- // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
- RLDC $3, R3, $(3*8), R6
- // Shift val for aligned ptr. R4 = val << R6
- SLD R6, R4, R4
-
-again:
- SYNC
- LWAR (R5), R6
- OR R4, R6
- STWCCC R6, (R5)
- BNE again
- SYNC
- ISYNC
- RET
-
-// void runtime·atomicand8(byte volatile*, byte);
-TEXT runtime·atomicand8(SB), NOSPLIT, $0-9
- MOVD ptr+0(FP), R3
- MOVBZ val+8(FP), R4
- // Align ptr down to 4 bytes so we can use 32-bit load/store.
- // R5 = (R3 << 0) & ~3
- RLDCR $0, R3, $~3, R5
- // Compute val shift.
-#ifdef GOARCH_ppc64
- // Big endian. ptr = ptr ^ 3
- XOR $3, R3
-#endif
- // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
- RLDC $3, R3, $(3*8), R6
- // Shift val for aligned ptr. R4 = val << R6 | ^(0xFF << R6)
- MOVD $0xFF, R7
- SLD R6, R4
- SLD R6, R7
- XOR $-1, R7
- OR R7, R4
-again:
- SYNC
- LWAR (R5), R6
- AND R4, R6
- STWCCC R6, (R5)
- BNE again
- SYNC
- ISYNC
- RET
-
// void jmpdefer(fv, sp);
// called from deferreturn.
// 1. grab stored LR for caller
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-var locktab [57]struct {
- l mutex
- pad [_CacheLineSize - unsafe.Sizeof(mutex{})]byte
-}
-
-func addrLock(addr *uint64) *mutex {
- return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
-}
-
-// Atomic add and return new value.
-//go:nosplit
-func xadd(val *uint32, delta int32) uint32 {
- for {
- oval := *val
- nval := oval + uint32(delta)
- if cas(val, oval, nval) {
- return nval
- }
- }
-}
-
-//go:noescape
-//go:linkname xadduintptr runtime.xadd
-func xadduintptr(ptr *uintptr, delta uintptr) uintptr
-
-//go:nosplit
-func xchg(addr *uint32, v uint32) uint32 {
- for {
- old := *addr
- if cas(addr, old, v) {
- return old
- }
- }
-}
-
-//go:nosplit
-func xchguintptr(addr *uintptr, v uintptr) uintptr {
- return uintptr(xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
-}
-
-//go:nosplit
-func atomicload(addr *uint32) uint32 {
- return xadd(addr, 0)
-}
-
-//go:nosplit
-func atomicloadp(addr unsafe.Pointer) unsafe.Pointer {
- return unsafe.Pointer(uintptr(xadd((*uint32)(addr), 0)))
-}
-
-//go:nosplit
-func atomicstorep1(addr unsafe.Pointer, v unsafe.Pointer) {
- for {
- old := *(*unsafe.Pointer)(addr)
- if casp1((*unsafe.Pointer)(addr), old, v) {
- return
- }
- }
-}
-
-//go:nosplit
-func atomicstore(addr *uint32, v uint32) {
- for {
- old := *addr
- if cas(addr, old, v) {
- return
- }
- }
-}
-
-//go:nosplit
-func cas64(addr *uint64, old, new uint64) bool {
- var ok bool
- systemstack(func() {
- lock(addrLock(addr))
- if *addr == old {
- *addr = new
- ok = true
- }
- unlock(addrLock(addr))
- })
- return ok
-}
-
-//go:nosplit
-func xadd64(addr *uint64, delta int64) uint64 {
- var r uint64
- systemstack(func() {
- lock(addrLock(addr))
- r = *addr + uint64(delta)
- *addr = r
- unlock(addrLock(addr))
- })
- return r
-}
-
-//go:nosplit
-func xchg64(addr *uint64, v uint64) uint64 {
- var r uint64
- systemstack(func() {
- lock(addrLock(addr))
- r = *addr
- *addr = v
- unlock(addrLock(addr))
- })
- return r
-}
-
-//go:nosplit
-func atomicload64(addr *uint64) uint64 {
- var r uint64
- systemstack(func() {
- lock(addrLock(addr))
- r = *addr
- unlock(addrLock(addr))
- })
- return r
-}
-
-//go:nosplit
-func atomicstore64(addr *uint64, v uint64) {
- systemstack(func() {
- lock(addrLock(addr))
- *addr = v
- unlock(addrLock(addr))
- })
-}
-
-//go:nosplit
-func atomicor8(addr *uint8, v uint8) {
- // Align down to 4 bytes and use 32-bit CAS.
- uaddr := uintptr(unsafe.Pointer(addr))
- addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
- word := uint32(v) << ((uaddr & 3) * 8) // little endian
- for {
- old := *addr32
- if cas(addr32, old, old|word) {
- return
- }
- }
-}
-
-//go:nosplit
-func atomicand8(addr *uint8, v uint8) {
- // Align down to 4 bytes and use 32-bit CAS.
- uaddr := uintptr(unsafe.Pointer(addr))
- addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
- word := uint32(v) << ((uaddr & 3) * 8) // little endian
- mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
- word |= ^mask
- for {
- old := *addr32
- if cas(addr32, old, old&word) {
- return
- }
- }
-}
#include "textflag.h"
-// uint32 runtime·atomicload(uint32 volatile* addr)
-TEXT ·atomicload(SB),NOSPLIT,$-8-12
- MOVD ptr+0(FP), R0
- LDARW (R0), R0
- MOVW R0, ret+8(FP)
- RET
-
-// uint64 runtime·atomicload64(uint64 volatile* addr)
-TEXT ·atomicload64(SB),NOSPLIT,$-8-16
- MOVD ptr+0(FP), R0
- LDAR (R0), R0
- MOVD R0, ret+8(FP)
- RET
-
-// void *runtime·atomicloadp(void *volatile *addr)
-TEXT ·atomicloadp(SB),NOSPLIT,$-8-16
- MOVD ptr+0(FP), R0
- LDAR (R0), R0
- MOVD R0, ret+8(FP)
- RET
-
-TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
- B runtime·atomicstore64(SB)
-
-TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
- MOVD ptr+0(FP), R0
- MOVW val+8(FP), R1
- STLRW R1, (R0)
- RET
-
-TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
- MOVD ptr+0(FP), R0
- MOVD val+8(FP), R1
- STLR R1, (R0)
- RET
-
-TEXT runtime·xchg(SB), NOSPLIT, $0-20
-again:
- MOVD ptr+0(FP), R0
- MOVW new+8(FP), R1
- LDAXRW (R0), R2
- STLXRW R1, (R0), R3
- CBNZ R3, again
- MOVW R2, ret+16(FP)
- RET
-
-TEXT runtime·xchg64(SB), NOSPLIT, $0-24
-again:
- MOVD ptr+0(FP), R0
- MOVD new+8(FP), R1
- LDAXR (R0), R2
- STLXR R1, (R0), R3
- CBNZ R3, again
- MOVD R2, ret+16(FP)
- RET
-
-// bool runtime·cas64(uint64 *ptr, uint64 old, uint64 new)
-// Atomically:
-// if(*val == *old){
-// *val = new;
-// return 1;
-// } else {
-// return 0;
-// }
-TEXT runtime·cas64(SB), NOSPLIT, $0-25
- MOVD ptr+0(FP), R0
- MOVD old+8(FP), R1
- MOVD new+16(FP), R2
-again:
- LDAXR (R0), R3
- CMP R1, R3
- BNE ok
- STLXR R2, (R0), R3
- CBNZ R3, again
-ok:
- CSET EQ, R0
- MOVB R0, ret+24(FP)
- RET
-
-// uint32 xadd(uint32 volatile *ptr, int32 delta)
-// Atomically:
-// *val += delta;
-// return *val;
-TEXT runtime·xadd(SB), NOSPLIT, $0-20
-again:
- MOVD ptr+0(FP), R0
- MOVW delta+8(FP), R1
- LDAXRW (R0), R2
- ADDW R2, R1, R2
- STLXRW R2, (R0), R3
- CBNZ R3, again
- MOVW R2, ret+16(FP)
- RET
-
-TEXT runtime·xadd64(SB), NOSPLIT, $0-24
-again:
- MOVD ptr+0(FP), R0
- MOVD delta+8(FP), R1
- LDAXR (R0), R2
- ADD R2, R1, R2
- STLXR R2, (R0), R3
- CBNZ R3, again
- MOVD R2, ret+16(FP)
- RET
-
-TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
- B runtime·xchg64(SB)
-
TEXT ·publicationBarrier(SB),NOSPLIT,$-8-0
DMB $0xe // DMB ST
RET
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// These functions cannot have go:noescape annotations,
// because while ptr does not escape, new does.
//go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
- atomicstorep1(noescape(ptr), new)
+ atomic.Storep1(noescape(ptr), new)
writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
}
//go:nosplit
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
- if !casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) {
+ if !atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) {
return false
}
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
//go:nosplit
func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
- atomicstorep1(noescape(unsafe.Pointer(ptr)), new)
+ atomic.Storep1(noescape(unsafe.Pointer(ptr)), new)
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ppc64 ppc64le
-
-package runtime
-
-import "unsafe"
-
-//go:noescape
-func xadd(ptr *uint32, delta int32) uint32
-
-//go:noescape
-func xadd64(ptr *uint64, delta int64) uint64
-
-//go:noescape
-//go:linkname xadduintptr runtime.xadd64
-func xadduintptr(ptr *uintptr, delta uintptr) uintptr
-
-//go:noescape
-func xchg(ptr *uint32, new uint32) uint32
-
-//go:noescape
-func xchg64(ptr *uint64, new uint64) uint64
-
-//go:noescape
-func xchguintptr(ptr *uintptr, new uintptr) uintptr
-
-//go:noescape
-func atomicload(ptr *uint32) uint32
-
-//go:noescape
-func atomicload64(ptr *uint64) uint64
-
-//go:noescape
-func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
-
-//go:noescape
-func atomicand8(ptr *uint8, val uint8)
-
-//go:noescape
-func atomicor8(ptr *uint8, val uint8)
-
-// NOTE: Do not add atomicxor8 (XOR is not idempotent).
-
-//go:noescape
-func cas64(ptr *uint64, old, new uint64) bool
-
-//go:noescape
-func atomicstore(ptr *uint32, val uint32)
-
-//go:noescape
-func atomicstore64(ptr *uint64, val uint64)
-
-// NO go:noescape annotation; see atomic_pointer.go.
-func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
#include "textflag.h"
-// uint32 runtime·atomicload(uint32 volatile* addr)
-TEXT ·atomicload(SB),NOSPLIT|NOFRAME,$0-12
- MOVD addr+0(FP), R3
- SYNC
- MOVWZ 0(R3), R3
- CMPW R3, R3, CR7
- BC 4, 30, 1(PC) // bne- cr7,0x4
- ISYNC
- MOVW R3, ret+8(FP)
- RET
-
-// uint64 runtime·atomicload64(uint64 volatile* addr)
-TEXT ·atomicload64(SB),NOSPLIT|NOFRAME,$0-16
- MOVD addr+0(FP), R3
- SYNC
- MOVD 0(R3), R3
- CMP R3, R3, CR7
- BC 4, 30, 1(PC) // bne- cr7,0x4
- ISYNC
- MOVD R3, ret+8(FP)
- RET
-
-// void *runtime·atomicloadp(void *volatile *addr)
-TEXT ·atomicloadp(SB),NOSPLIT|NOFRAME,$0-16
- MOVD addr+0(FP), R3
- SYNC
- MOVD 0(R3), R3
- CMP R3, R3, CR7
- BC 4, 30, 1(PC) // bne- cr7,0x4
- ISYNC
- MOVD R3, ret+8(FP)
- RET
-
TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
// LWSYNC is the "export" barrier recommended by Power ISA
// v2.07 book II, appendix B.2.2.2.
// For buffered channels, also:
// c.qcount > 0 implies that c.recvq is empty.
// c.qcount < c.dataqsiz implies that c.sendq is empty.
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
maxAlign = 8
// The order of operations is important here: reversing the operations can lead to
// incorrect behavior when racing with a close.
if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
- c.dataqsiz > 0 && atomicloaduint(&c.qcount) == 0) &&
- atomicload(&c.closed) == 0 {
+ c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
+ atomic.Load(&c.closed) == 0 {
return
}
// if sgp participates in a select and is already signaled, ignore it
if sgp.selectdone != nil {
// claim the right to signal
- if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) {
+ if *sgp.selectdone != 0 || !atomic.Cas(sgp.selectdone, 0, 1) {
continue
}
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
numBuckets = 1 << 10
if n&0x80000000 != 0 {
print("runtime: setcpuprofile(off) twice\n")
}
- if cas(&cpuprof.handoff, n, n|0x80000000) {
+ if atomic.Cas(&cpuprof.handoff, n, n|0x80000000) {
if n == 0 {
// we did the transition from 0 -> nonzero so we wake getprofile
notewakeup(&cpuprof.wait)
// so it cannot allocate memory or block. It can try to swap logs with
// the writing goroutine, as explained in the comment at the top of this file.
func (p *cpuProfile) flushlog() bool {
- if !cas(&p.handoff, 0, uint32(p.nlog)) {
+ if !atomic.Cas(&p.handoff, 0, uint32(p.nlog)) {
return false
}
notewakeup(&p.wait)
p.flushing = true
goto Flush
}
- if cas(&p.handoff, n, 0) {
+ if atomic.Cas(&p.handoff, n, 0) {
break
}
}
// Finally done. Clean up and return nil.
p.flushing = false
- if !cas(&p.handoff, p.handoff, 0) {
+ if !atomic.Cas(&p.handoff, p.handoff, 0) {
print("runtime: profile flush racing with something\n")
}
return nil
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// GOMAXPROCS sets the maximum number of CPUs that can be executing
// simultaneously and returns the previous setting. If n < 1, it does not
// NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64 {
var n int64
- for mp := (*m)(atomicloadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
+ for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
n += int64(mp.ncgocall)
}
return n
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
var Fadd64 = fadd64
var Fsub64 = fsub64
var Entersyscall = entersyscall
var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
-var Xadduintptr = xadduintptr
+var Xadduintptr = atomic.Xadduintptr
var FuncPC = funcPC
// before the table grows. Typical tables will be somewhat less loaded.
import (
+ "runtime/internal/atomic"
"unsafe"
)
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
}
}
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}
// Remember we have an iterator.
// Can run concurrently with another hash_iter_init().
if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
- atomicor8(&h.flags, iterator|oldIterator)
+ atomic.Or8(&h.flags, iterator|oldIterator)
}
mapiternext(it)
// serve as the zero value for t.
func mapzero(t *_type) {
// Is the type small enough for existing buffer?
- cursize := uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
+ cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if t.size <= cursize {
return
}
// Allocate a new buffer.
lock(&zerolock)
- cursize = uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
+ cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if cursize < t.size {
for cursize < t.size {
cursize *= 2
throw("map element too large")
}
}
- atomicstorep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
- atomicstorep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
+ atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
+ atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
}
unlock(&zerolock)
}
package runtime
import (
+ "runtime/internal/atomic"
"unsafe"
)
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
key := stringStructOf(&ky)
if h.B == 0 {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
}
}
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
}
}
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr))
+ return atomic.Loadp(unsafe.Pointer(&zeroptr))
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
key := stringStructOf(&ky)
if h.B == 0 {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
}
}
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
}
}
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return atomicloadp(unsafe.Pointer(&zeroptr)), false
+ return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
hashSize = 1009
if locked != 0 {
lock(&ifaceLock)
}
- for m = (*itab)(atomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
+ for m = (*itab)(atomic.Loadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
if m.inter == inter && m._type == typ {
if m.bad != 0 {
m = nil
if msanenabled {
msanread(elem, t.size)
}
- tab := (*itab)(atomicloadp(unsafe.Pointer(cache)))
+ tab := (*itab)(atomic.Loadp(unsafe.Pointer(cache)))
if tab == nil {
tab = getitab(inter, t, false)
atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+const (
+ _CacheLineSize = 64
+)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+const (
+ _CacheLineSize = 64
+)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+const (
+ thechar = '6'
+ _BigEndian = 0
+ _CacheLineSize = 64
+ _PCQuantum = 1
+ _Int64Align = 8
+ hugePageSize = 1 << 21
+)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+const (
+ _CacheLineSize = 32
+)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+const (
+ _CacheLineSize = 32
+)
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+const (
+ _CacheLineSize = 64
+)
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+const (
+ _CacheLineSize = 64
+)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT runtime∕internal∕atomic·nop(SB),NOSPLIT,$0-0
+ RET
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// bool Cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13
+ MOVL ptr+0(FP), BX
+ MOVL old+4(FP), AX
+ MOVL new+8(FP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ SETEQ ret+12(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13
+ JMP runtime∕internal∕atomic·Cas(SB)
+
+TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-8
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
+ JMP runtime∕internal∕atomic·Store(SB)
+
+TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-8
+ JMP runtime∕internal∕atomic·Xadd(SB)
+
+TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+
+// bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new)
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-21
+ MOVL ptr+0(FP), BP
+ MOVL old_lo+4(FP), AX
+ MOVL old_hi+8(FP), DX
+ MOVL new_lo+12(FP), BX
+ MOVL new_hi+16(FP), CX
+ LOCK
+ CMPXCHG8B 0(BP)
+ SETEQ ret+20(FP)
+ RET
+
+// bool Casp(void **p, void *old, void *new)
+// Atomically:
+// if(*p == old){
+// *p = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-13
+ MOVL ptr+0(FP), BX
+ MOVL old+4(FP), AX
+ MOVL new+8(FP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ SETEQ ret+12(FP)
+ RET
+
+// uint32 Xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12
+ MOVL ptr+0(FP), BX
+ MOVL delta+4(FP), AX
+ MOVL AX, CX
+ LOCK
+ XADDL AX, 0(BX)
+ ADDL CX, AX
+ MOVL AX, ret+8(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12
+ MOVL ptr+0(FP), BX
+ MOVL new+4(FP), AX
+ XCHGL AX, 0(BX)
+ MOVL AX, ret+8(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12
+ JMP runtime∕internal∕atomic·Xchg(SB)
+
+
+TEXT runtime∕internal∕atomic·Storep1(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), BX
+ MOVL val+4(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), BX
+ MOVL val+4(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+// uint64 atomicload64(uint64 volatile* addr);
+TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12
+ MOVL ptr+0(FP), AX
+ TESTL $7, AX
+ JZ 2(PC)
+ MOVL 0, AX // crash with nil ptr deref
+ LEAL ret_lo+4(FP), BX
+ // MOVQ (%EAX), %MM0
+ BYTE $0x0f; BYTE $0x6f; BYTE $0x00
+ // MOVQ %MM0, 0(%EBX)
+ BYTE $0x0f; BYTE $0x7f; BYTE $0x03
+ // EMMS
+ BYTE $0x0F; BYTE $0x77
+ RET
+
+// void runtime∕internal∕atomic·Store64(uint64 volatile* addr, uint64 v);
+TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-12
+ MOVL ptr+0(FP), AX
+ TESTL $7, AX
+ JZ 2(PC)
+ MOVL 0, AX // crash with nil ptr deref
+ // MOVQ and EMMS were introduced on the Pentium MMX.
+ // MOVQ 0x8(%ESP), %MM0
+ BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
+ // MOVQ %MM0, (%EAX)
+ BYTE $0x0f; BYTE $0x7f; BYTE $0x00
+ // EMMS
+ BYTE $0x0F; BYTE $0x77
+ // This is essentially a no-op, but it provides required memory fencing.
+ // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
+ MOVL $0, AX
+ LOCK
+ XADDL AX, (SP)
+ RET
+
+// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5
+ MOVL ptr+0(FP), AX
+ MOVB val+4(FP), BX
+ LOCK
+ ORB BX, (AX)
+ RET
+
+// void runtime∕internal∕atomic·And8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
+ MOVL ptr+0(FP), AX
+ MOVB val+4(FP), BX
+ LOCK
+ ANDB BX, (AX)
+ RET
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// bool Cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0-17
+ MOVQ ptr+0(FP), BX
+ MOVL old+8(FP), AX
+ MOVL new+12(FP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ SETEQ ret+16(FP)
+ RET
+
+// bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new)
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
+ MOVQ ptr+0(FP), BX
+ MOVQ old+8(FP), AX
+ MOVQ new+16(FP), CX
+ LOCK
+ CMPXCHGQ CX, 0(BX)
+ SETEQ ret+24(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
+ JMP runtime∕internal∕atomic·Cas64(SB)
+
+TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Store64(SB)
+
+TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+// bool Casp(void **val, void *old, void *new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25
+ MOVQ ptr+0(FP), BX
+ MOVQ old+8(FP), AX
+ MOVQ new+16(FP), CX
+ LOCK
+ CMPXCHGQ CX, 0(BX)
+ SETEQ ret+24(FP)
+ RET
+
+// uint32 Xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
+ MOVQ ptr+0(FP), BX
+ MOVL delta+8(FP), AX
+ MOVL AX, CX
+ LOCK
+ XADDL AX, 0(BX)
+ ADDL CX, AX
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
+ MOVQ ptr+0(FP), BX
+ MOVQ delta+8(FP), AX
+ MOVQ AX, CX
+ LOCK
+ XADDQ AX, 0(BX)
+ ADDQ CX, AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
+ MOVQ ptr+0(FP), BX
+ MOVL new+8(FP), AX
+ XCHGL AX, 0(BX)
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
+ MOVQ ptr+0(FP), BX
+ MOVQ new+8(FP), AX
+ XCHGQ AX, 0(BX)
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
+ JMP runtime∕internal∕atomic·Xchg64(SB)
+
+TEXT runtime∕internal∕atomic·Storep1(SB), NOSPLIT, $0-16
+ MOVQ ptr+0(FP), BX
+ MOVQ val+8(FP), AX
+ XCHGQ AX, 0(BX)
+ RET
+
+TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
+ MOVQ ptr+0(FP), BX
+ MOVL val+8(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
+ MOVQ ptr+0(FP), BX
+ MOVQ val+8(FP), AX
+ XCHGQ AX, 0(BX)
+ RET
+
+// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
+ MOVQ ptr+0(FP), AX
+ MOVB val+8(FP), BX
+ LOCK
+ ORB BX, (AX)
+ RET
+
+// void runtime∕internal∕atomic·And8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9
+ MOVQ ptr+0(FP), AX
+ MOVB val+8(FP), BX
+ LOCK
+ ANDB BX, (AX)
+ RET
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// bool Cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
+ MOVL ptr+0(FP), BX
+ MOVL old+4(FP), AX
+ MOVL new+8(FP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ SETEQ ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-17
+ JMP runtime∕internal∕atomic·Cas(SB)
+
+TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-12
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-12
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-12
+ JMP runtime∕internal∕atomic·Store(SB)
+
+TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-24
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+// bool runtime∕internal∕atomic·cas64(uint64 *val, uint64 old, uint64 new)
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
+ MOVL ptr+0(FP), BX
+ MOVQ old+8(FP), AX
+ MOVQ new+16(FP), CX
+ LOCK
+ CMPXCHGQ CX, 0(BX)
+ SETEQ ret+24(FP)
+ RET
+
+// bool Casp(void **val, void *old, void *new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-17
+ MOVL ptr+0(FP), BX
+ MOVL old+4(FP), AX
+ MOVL new+8(FP), CX
+ LOCK
+ CMPXCHGL CX, 0(BX)
+ SETEQ ret+16(FP)
+ RET
+
+// uint32 Xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12
+ MOVL ptr+0(FP), BX
+ MOVL delta+4(FP), AX
+ MOVL AX, CX
+ LOCK
+ XADDL AX, 0(BX)
+ ADDL CX, AX
+ MOVL AX, ret+8(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
+ MOVL ptr+0(FP), BX
+ MOVQ delta+8(FP), AX
+ MOVQ AX, CX
+ LOCK
+ XADDQ AX, 0(BX)
+ ADDQ CX, AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-12
+ JMP runtime∕internal∕atomic·Xadd(SB)
+
+TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12
+ MOVL ptr+0(FP), BX
+ MOVL new+4(FP), AX
+ XCHGL AX, 0(BX)
+ MOVL AX, ret+8(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
+ MOVL ptr+0(FP), BX
+ MOVQ new+8(FP), AX
+ XCHGQ AX, 0(BX)
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12
+ JMP runtime∕internal∕atomic·Xchg(SB)
+
+TEXT runtime∕internal∕atomic·Storep1(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), BX
+ MOVL val+4(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
+ MOVL ptr+0(FP), BX
+ MOVL val+4(FP), AX
+ XCHGL AX, 0(BX)
+ RET
+
+TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
+ MOVL ptr+0(FP), BX
+ MOVQ val+8(FP), AX
+ XCHGQ AX, 0(BX)
+ RET
+
+// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5
+ MOVL ptr+0(FP), BX
+ MOVB val+4(FP), AX
+ LOCK
+ ORB AX, 0(BX)
+ RET
+
+// void runtime∕internal∕atomic·And8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
+ MOVL ptr+0(FP), BX
+ MOVB val+4(FP), AX
+ LOCK
+ ANDB AX, 0(BX)
+ RET
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// bool armcas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// }else
+// return 0;
+//
+// To implement runtime∕internal∕atomic·cas in sys_$GOOS_arm.s
+// using the native instructions, use:
+//
+// TEXT runtime∕internal∕atomic·cas(SB),NOSPLIT,$0
+// B runtime∕internal∕atomic·armcas(SB)
+//
+TEXT runtime∕internal∕atomic·armcas(SB),NOSPLIT,$0-13
+ MOVW valptr+0(FP), R1
+ MOVW old+4(FP), R2
+ MOVW new+8(FP), R3
+casl:
+ LDREX (R1), R0
+ CMP R0, R2
+ BNE casfail
+
+ MOVB runtime·goarm(SB), R11
+ CMP $7, R11
+ BLT 2(PC)
+ WORD $0xf57ff05a // dmb ishst
+
+ STREX R3, (R1), R0
+ CMP $0, R0
+ BNE casl
+ MOVW $1, R0
+
+ MOVB runtime·goarm(SB), R11
+ CMP $7, R11
+ BLT 2(PC)
+ WORD $0xf57ff05b // dmb ish
+
+ MOVB R0, ret+12(FP)
+ RET
+casfail:
+ MOVW $0, R0
+ MOVB R0, ret+12(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Casuintptr(SB),NOSPLIT,$0-13
+ B runtime∕internal∕atomic·Cas(SB)
+
+TEXT runtime∕internal∕atomic·Loaduintptr(SB),NOSPLIT,$0-8
+ B runtime∕internal∕atomic·Load(SB)
+
+TEXT runtime∕internal∕atomic·Loaduint(SB),NOSPLIT,$0-8
+ B runtime∕internal∕atomic·Load(SB)
+
+TEXT runtime∕internal∕atomic·Storeuintptr(SB),NOSPLIT,$0-8
+ B runtime∕internal∕atomic·Store(SB)
+
+TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-8
+ B runtime∕internal∕atomic·Xadd(SB)
+
+TEXT runtime∕internal∕atomic·Loadint64(SB),NOSPLIT,$0-16
+ B runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Xaddint64(SB),NOSPLIT,$0-16
+ B runtime∕internal∕atomic·Xadd64(SB)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// bool Cas(uint32 *ptr, uint32 old, uint32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
+ MOVD ptr+0(FP), R0
+ MOVW old+8(FP), R1
+ MOVW new+12(FP), R2
+again:
+ LDAXRW (R0), R3
+ CMPW R1, R3
+ BNE ok
+ STLXRW R2, (R0), R3
+ CBNZ R3, again
+ok:
+ CSET EQ, R0
+ MOVB R0, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
+ B runtime∕internal∕atomic·Cas64(SB)
+
+TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $-8-16
+ B runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $-8-16
+ B runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
+ B runtime∕internal∕atomic·Store64(SB)
+
+TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-16
+ B runtime∕internal∕atomic·Xadd64(SB)
+
+TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
+ B runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
+ B runtime∕internal∕atomic·Xadd64(SB)
+
+// bool Casp(void **val, void *old, void *new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25
+ B runtime∕internal∕atomic·Cas64(SB)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ppc64 ppc64le
+
+#include "textflag.h"
+
+// bool cas(uint32 *ptr, uint32 old, uint32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
+ MOVD ptr+0(FP), R3
+ MOVWZ old+8(FP), R4
+ MOVWZ new+12(FP), R5
+cas_again:
+ SYNC
+ LWAR (R3), R6
+ CMPW R6, R4
+ BNE cas_fail
+ STWCCC R5, (R3)
+ BNE cas_again
+ MOVD $1, R3
+ SYNC
+ ISYNC
+ MOVB R3, ret+16(FP)
+ RET
+cas_fail:
+ MOVD $0, R3
+ BR -5(PC)
+
+// bool runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
+ MOVD ptr+0(FP), R3
+ MOVD old+8(FP), R4
+ MOVD new+16(FP), R5
+cas64_again:
+ SYNC
+ LDAR (R3), R6
+ CMP R6, R4
+ BNE cas64_fail
+ STDCCC R5, (R3)
+ BNE cas64_again
+ MOVD $1, R3
+ SYNC
+ ISYNC
+ MOVB R3, ret+24(FP)
+ RET
+cas64_fail:
+ MOVD $0, R3
+ BR -5(PC)
+
+TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
+ BR runtime∕internal∕atomic·Cas64(SB)
+
+TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16
+ BR runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
+ BR runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
+ BR runtime∕internal∕atomic·Store64(SB)
+
+TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
+ BR runtime∕internal∕atomic·Xadd64(SB)
+
+TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
+ BR runtime∕internal∕atomic·Load64(SB)
+
+TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
+ BR runtime∕internal∕atomic·Xadd64(SB)
+
+// bool casp(void **val, void *old, void *new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25
+ BR runtime∕internal∕atomic·Cas64(SB)
+
+// uint32 xadd(uint32 volatile *ptr, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
+ MOVD ptr+0(FP), R4
+ MOVW delta+8(FP), R5
+ SYNC
+ LWAR (R4), R3
+ ADD R5, R3
+ STWCCC R3, (R4)
+ BNE -4(PC)
+ SYNC
+ ISYNC
+ MOVW R3, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
+ MOVD ptr+0(FP), R4
+ MOVD delta+8(FP), R5
+ SYNC
+ LDAR (R4), R3
+ ADD R5, R3
+ STDCCC R3, (R4)
+ BNE -4(PC)
+ SYNC
+ ISYNC
+ MOVD R3, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
+ MOVD ptr+0(FP), R4
+ MOVW new+8(FP), R5
+ SYNC
+ LWAR (R4), R3
+ STWCCC R5, (R4)
+ BNE -3(PC)
+ SYNC
+ ISYNC
+ MOVW R3, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
+ MOVD ptr+0(FP), R4
+ MOVD new+8(FP), R5
+ SYNC
+ LDAR (R4), R3
+ STDCCC R5, (R4)
+ BNE -3(PC)
+ SYNC
+ ISYNC
+ MOVD R3, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
+ BR runtime∕internal∕atomic·Xchg64(SB)
+
+
+TEXT runtime∕internal∕atomic·Storep1(SB), NOSPLIT, $0-16
+ BR runtime∕internal∕atomic·Store64(SB)
+
+TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
+ MOVD ptr+0(FP), R3
+ MOVW val+8(FP), R4
+ SYNC
+ MOVW R4, 0(R3)
+ RET
+
+TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
+ MOVD ptr+0(FP), R3
+ MOVD val+8(FP), R4
+ SYNC
+ MOVD R4, 0(R3)
+ RET
+
+// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
+ MOVD ptr+0(FP), R3
+ MOVBZ val+8(FP), R4
+ // Align ptr down to 4 bytes so we can use 32-bit load/store.
+ // R5 = (R3 << 0) & ~3
+ RLDCR $0, R3, $~3, R5
+ // Compute val shift.
+#ifdef GOARCH_ppc64
+ // Big endian. ptr = ptr ^ 3
+ XOR $3, R3
+#endif
+ // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
+ RLDC $3, R3, $(3*8), R6
+ // Shift val for aligned ptr. R4 = val << R6
+ SLD R6, R4, R4
+
+again:
+ SYNC
+ LWAR (R5), R6
+ OR R4, R6
+ STWCCC R6, (R5)
+ BNE again
+ SYNC
+ ISYNC
+ RET
+
+// void runtime∕internal∕atomic·And8(byte volatile*, byte);
+TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9
+ MOVD ptr+0(FP), R3
+ MOVBZ val+8(FP), R4
+ // Align ptr down to 4 bytes so we can use 32-bit load/store.
+ // R5 = (R3 << 0) & ~3
+ RLDCR $0, R3, $~3, R5
+ // Compute val shift.
+#ifdef GOARCH_ppc64
+ // Big endian. ptr = ptr ^ 3
+ XOR $3, R3
+#endif
+ // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
+ RLDC $3, R3, $(3*8), R6
+ // Shift val for aligned ptr. R4 = val << R6 | ^(0xFF << R6)
+ MOVD $0xFF, R7
+ SLD R6, R4
+ SLD R6, R7
+ XOR $-1, R7
+ OR R7, R4
+again:
+ SYNC
+ LWAR (R5), R6
+ AND R4, R6
+ STWCCC R6, (R5)
+ BNE again
+ SYNC
+ ISYNC
+ RET
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package runtime
+// +build 386
+
+package atomic
import "unsafe"
// code by optimizers will preserve the relative order of memory accesses.
//go:nosplit
-func atomicload(ptr *uint32) uint32 {
+func Load(ptr *uint32) uint32 {
nop()
return *ptr
}
//go:nosplit
-func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
+func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
nop()
return *(*unsafe.Pointer)(ptr)
}
//go:nosplit
-func xadd64(ptr *uint64, delta int64) uint64 {
+func Xadd64(ptr *uint64, delta int64) uint64 {
for {
old := *ptr
- if cas64(ptr, old, old+uint64(delta)) {
+ if Cas64(ptr, old, old+uint64(delta)) {
return old + uint64(delta)
}
}
}
//go:noescape
-//go:linkname xadduintptr runtime.xadd
-func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:nosplit
-func xchg64(ptr *uint64, new uint64) uint64 {
+func Xchg64(ptr *uint64, new uint64) uint64 {
for {
old := *ptr
- if cas64(ptr, old, new) {
+ if Cas64(ptr, old, new) {
return old
}
}
}
//go:noescape
-func xadd(ptr *uint32, delta int32) uint32
+func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
-func xchg(ptr *uint32, new uint32) uint32
+func Xchg(ptr *uint32, new uint32) uint32
//go:noescape
-func xchguintptr(ptr *uintptr, new uintptr) uintptr
+func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
-func atomicload64(ptr *uint64) uint64
+func Load64(ptr *uint64) uint64
//go:noescape
-func atomicand8(ptr *uint8, val uint8)
+func And8(ptr *uint8, val uint8)
//go:noescape
-func atomicor8(ptr *uint8, val uint8)
+func Or8(ptr *uint8, val uint8)
// NOTE: Do not add atomicxor8 (XOR is not idempotent).
//go:noescape
-func cas64(ptr *uint64, old, new uint64) bool
+func Cas64(ptr *uint64, old, new uint64) bool
//go:noescape
-func atomicstore(ptr *uint32, val uint32)
+func Store(ptr *uint32, val uint32)
//go:noescape
-func atomicstore64(ptr *uint64, val uint64)
+func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
-func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
+func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
// +build amd64 amd64p32
-package runtime
+package atomic
import "unsafe"
// code by optimizers will preserve the relative order of memory accesses.
//go:nosplit
-func atomicload(ptr *uint32) uint32 {
+func Load(ptr *uint32) uint32 {
nop()
return *ptr
}
//go:nosplit
-func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
+func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
nop()
return *(*unsafe.Pointer)(ptr)
}
//go:nosplit
-func atomicload64(ptr *uint64) uint64 {
+func Load64(ptr *uint64) uint64 {
nop()
return *ptr
}
//go:noescape
-func xadd(ptr *uint32, delta int32) uint32
+func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
-func xadd64(ptr *uint64, delta int64) uint64
+func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
-func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
-func xchg(ptr *uint32, new uint32) uint32
+func Xchg(ptr *uint32, new uint32) uint32
//go:noescape
-func xchg64(ptr *uint64, new uint64) uint64
+func Xchg64(ptr *uint64, new uint64) uint64
//go:noescape
-func xchguintptr(ptr *uintptr, new uintptr) uintptr
+func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
-func atomicand8(ptr *uint8, val uint8)
+func And8(ptr *uint8, val uint8)
//go:noescape
-func atomicor8(ptr *uint8, val uint8)
+func Or8(ptr *uint8, val uint8)
// NOTE: Do not add atomicxor8 (XOR is not idempotent).
//go:noescape
-func cas64(ptr *uint64, old, new uint64) bool
+func Cas64(ptr *uint64, old, new uint64) bool
//go:noescape
-func atomicstore(ptr *uint32, val uint32)
+func Store(ptr *uint32, val uint32)
//go:noescape
-func atomicstore64(ptr *uint64, val uint64)
+func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
-func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
+func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm
+
+package atomic
+
+import "unsafe"
+
+type spinlock struct {
+ v uint32
+}
+
+//go:nosplit
+func (l *spinlock) lock() {
+ for {
+ if Cas(&l.v, 0, 1) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func (l *spinlock) unlock() {
+ Store(&l.v, 0)
+}
+
+var locktab [57]struct {
+ l spinlock
+ pad [_CacheLineSize - unsafe.Sizeof(spinlock{})]byte
+}
+
+func addrLock(addr *uint64) *spinlock {
+ return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
+}
+
+// Atomic add and return new value.
+//go:nosplit
+func Xadd(val *uint32, delta int32) uint32 {
+ for {
+ oval := *val
+ nval := oval + uint32(delta)
+ if Cas(val, oval, nval) {
+ return nval
+ }
+ }
+}
+
+//go:noescape
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
+//go:nosplit
+func Xchg(addr *uint32, v uint32) uint32 {
+ for {
+ old := *addr
+ if Cas(addr, old, v) {
+ return old
+ }
+ }
+}
+
+//go:nosplit
+func Xchguintptr(addr *uintptr, v uintptr) uintptr {
+ return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
+}
+
+//go:nosplit
+func Load(addr *uint32) uint32 {
+ return Xadd(addr, 0)
+}
+
+// Should be a built-in for unsafe.Pointer?
+//go:nosplit
+func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+//go:nosplit
+func Loadp(addr unsafe.Pointer) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(Xadd((*uint32)(addr), 0)))
+}
+
+//go:nosplit
+func Storep1(addr unsafe.Pointer, v unsafe.Pointer) {
+ for {
+ old := *(*unsafe.Pointer)(addr)
+ if Casp1((*unsafe.Pointer)(addr), old, v) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func Store(addr *uint32, v uint32) {
+ for {
+ old := *addr
+ if Cas(addr, old, v) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func Cas64(addr *uint64, old, new uint64) bool {
+ var ok bool
+ addrLock(addr).lock()
+ if *addr == old {
+ *addr = new
+ ok = true
+ }
+ addrLock(addr).unlock()
+ return ok
+}
+
+//go:nosplit
+func Xadd64(addr *uint64, delta int64) uint64 {
+ var r uint64
+ addrLock(addr).lock()
+ r = *addr + uint64(delta)
+ *addr = r
+ addrLock(addr).unlock()
+ return r
+}
+
+//go:nosplit
+func Xchg64(addr *uint64, v uint64) uint64 {
+ var r uint64
+ addrLock(addr).lock()
+ r = *addr
+ *addr = v
+ addrLock(addr).unlock()
+ return r
+}
+
+//go:nosplit
+func Load64(addr *uint64) uint64 {
+ var r uint64
+ addrLock(addr).lock()
+ r = *addr
+ addrLock(addr).unlock()
+ return r
+}
+
+//go:nosplit
+func Store64(addr *uint64, v uint64) {
+ addrLock(addr).lock()
+ *addr = v
+ addrLock(addr).unlock()
+}
+
+//go:nosplit
+func Or8(addr *uint8, v uint8) {
+ // Align down to 4 bytes and use 32-bit CAS.
+ uaddr := uintptr(unsafe.Pointer(addr))
+ addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
+ word := uint32(v) << ((uaddr & 3) * 8) // little endian
+ for {
+ old := *addr32
+ if Cas(addr32, old, old|word) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func And8(addr *uint8, v uint8) {
+ // Align down to 4 bytes and use 32-bit CAS.
+ uaddr := uintptr(unsafe.Pointer(addr))
+ addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
+ word := uint32(v) << ((uaddr & 3) * 8) // little endian
+ mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
+ word |= ^mask
+ for {
+ old := *addr32
+ if Cas(addr32, old, old&word) {
+ return
+ }
+ }
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package runtime
+// +build arm64
+
+package atomic
import "unsafe"
//go:noescape
-func xadd(ptr *uint32, delta int32) uint32
+func Xadd(ptr *uint32, delta int32) uint32
//go:noescape
-func xadd64(ptr *uint64, delta int64) uint64
+func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
-//go:linkname xadduintptr runtime.xadd64
-func xadduintptr(ptr *uintptr, delta uintptr) uintptr
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
-func xchg(ptr *uint32, new uint32) uint32
+func Xchg(ptr *uint32, new uint32) uint32
//go:noescape
-func xchg64(ptr *uint64, new uint64) uint64
+func Xchg64(ptr *uint64, new uint64) uint64
//go:noescape
-func xchguintptr(ptr *uintptr, new uintptr) uintptr
+func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
-func atomicload(ptr *uint32) uint32
+func Load(ptr *uint32) uint32
//go:noescape
-func atomicload64(ptr *uint64) uint64
+func Load64(ptr *uint64) uint64
//go:noescape
-func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
+func Loadp(ptr unsafe.Pointer) unsafe.Pointer
//go:nosplit
-func atomicor8(addr *uint8, v uint8) {
+func Or8(addr *uint8, v uint8) {
// TODO(dfc) implement this in asm.
// Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr))
word := uint32(v) << ((uaddr & 3) * 8) // little endian
for {
old := *addr32
- if cas(addr32, old, old|word) {
+ if Cas(addr32, old, old|word) {
return
}
}
}
//go:nosplit
-func atomicand8(addr *uint8, v uint8) {
+func And8(addr *uint8, v uint8) {
// TODO(dfc) implement this in asm.
// Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr))
word |= ^mask
for {
old := *addr32
- if cas(addr32, old, old&word) {
+ if Cas(addr32, old, old&word) {
return
}
}
}
//go:noescape
-func cas64(ptr *uint64, old, new uint64) bool
+func Cas64(ptr *uint64, old, new uint64) bool
//go:noescape
-func atomicstore(ptr *uint32, val uint32)
+func Store(ptr *uint32, val uint32)
//go:noescape
-func atomicstore64(ptr *uint64, val uint64)
+func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
-func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
+func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// uint32 runtime∕internal∕atomic·Load(uint32 volatile* addr)
+TEXT ·Load(SB),NOSPLIT,$-8-12
+ MOVD ptr+0(FP), R0
+ LDARW (R0), R0
+ MOVW R0, ret+8(FP)
+ RET
+
+// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* addr)
+TEXT ·Load64(SB),NOSPLIT,$-8-16
+ MOVD ptr+0(FP), R0
+ LDAR (R0), R0
+ MOVD R0, ret+8(FP)
+ RET
+
+// void *runtime∕internal∕atomic·Loadp(void *volatile *addr)
+TEXT ·Loadp(SB),NOSPLIT,$-8-16
+ MOVD ptr+0(FP), R0
+ LDAR (R0), R0
+ MOVD R0, ret+8(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Storep1(SB), NOSPLIT, $0-16
+ B runtime∕internal∕atomic·Store64(SB)
+
+TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
+ MOVD ptr+0(FP), R0
+ MOVW val+8(FP), R1
+ STLRW R1, (R0)
+ RET
+
+TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
+ MOVD ptr+0(FP), R0
+ MOVD val+8(FP), R1
+ STLR R1, (R0)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
+again:
+ MOVD ptr+0(FP), R0
+ MOVW new+8(FP), R1
+ LDAXRW (R0), R2
+ STLXRW R1, (R0), R3
+ CBNZ R3, again
+ MOVW R2, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
+again:
+ MOVD ptr+0(FP), R0
+ MOVD new+8(FP), R1
+ LDAXR (R0), R2
+ STLXR R1, (R0), R3
+ CBNZ R3, again
+ MOVD R2, ret+16(FP)
+ RET
+
+// bool runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
+ MOVD ptr+0(FP), R0
+ MOVD old+8(FP), R1
+ MOVD new+16(FP), R2
+again:
+ LDAXR (R0), R3
+ CMP R1, R3
+ BNE ok
+ STLXR R2, (R0), R3
+ CBNZ R3, again
+ok:
+ CSET EQ, R0
+ MOVB R0, ret+24(FP)
+ RET
+
+// uint32 xadd(uint32 volatile *ptr, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
+again:
+ MOVD ptr+0(FP), R0
+ MOVW delta+8(FP), R1
+ LDAXRW (R0), R2
+ ADDW R2, R1, R2
+ STLXRW R2, (R0), R3
+ CBNZ R3, again
+ MOVW R2, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
+again:
+ MOVD ptr+0(FP), R0
+ MOVD delta+8(FP), R1
+ LDAXR (R0), R2
+ ADD R2, R1, R2
+ STLXR R2, (R0), R3
+ CBNZ R3, again
+ MOVD R2, ret+16(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
+ B runtime∕internal∕atomic·Xchg64(SB)
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ppc64 ppc64le
+
+package atomic
+
+import "unsafe"
+
+//go:noescape
+func Xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func Xadd64(ptr *uint64, delta int64) uint64
+
+//go:noescape
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
+//go:noescape
+func Xchg(ptr *uint32, new uint32) uint32
+
+//go:noescape
+func Xchg64(ptr *uint64, new uint64) uint64
+
+//go:noescape
+func Xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:noescape
+func Load(ptr *uint32) uint32
+
+//go:noescape
+func Load64(ptr *uint64) uint64
+
+//go:noescape
+func Loadp(ptr unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func And8(ptr *uint8, val uint8)
+
+//go:noescape
+func Or8(ptr *uint8, val uint8)
+
+// NOTE: Do not add atomicxor8 (XOR is not idempotent).
+
+//go:noescape
+func Cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func Store(ptr *uint32, val uint32)
+
+//go:noescape
+func Store64(ptr *uint64, val uint64)
+
+// NO go:noescape annotation; see atomic_pointer.go.
+func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ppc64 ppc64le
+
+#include "textflag.h"
+
+// uint32 runtime∕internal∕atomic·Load(uint32 volatile* addr)
+TEXT ·Load(SB),NOSPLIT|NOFRAME,$-8-12
+ MOVD addr+0(FP), R3
+ SYNC
+ MOVWZ 0(R3), R3
+ CMPW R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7,0x4
+ ISYNC
+ MOVW R3, ret+8(FP)
+ RET
+
+// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* addr)
+TEXT ·Load64(SB),NOSPLIT|NOFRAME,$-8-16
+ MOVD addr+0(FP), R3
+ SYNC
+ MOVD 0(R3), R3
+ CMP R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7,0x4
+ ISYNC
+ MOVD R3, ret+8(FP)
+ RET
+
+// void *runtime∕internal∕atomic·Loadp(void *volatile *addr)
+TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$-8-16
+ MOVD addr+0(FP), R3
+ SYNC
+ MOVD 0(R3), R3
+ CMP R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7,0x4
+ ISYNC
+ MOVD R3, ret+8(FP)
+ RET
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package runtime_test
+package atomic_test
import (
"runtime"
+ "runtime/internal/atomic"
"testing"
"unsafe"
)
inc := uintptr(100)
total := uintptr(0)
runParallel(N, iter, func() {
- runtime.Xadduintptr(&total, inc)
+ atomic.Xadduintptr(&total, inc)
})
if want := uintptr(N * iter * inc); want != total {
t.Fatalf("xadduintpr error, want %d, got %d", want, total)
}
total = 0
runParallel(N, iter, func() {
- runtime.Xadduintptr(&total, inc)
- runtime.Xadduintptr(&total, uintptr(-int64(inc)))
+ atomic.Xadduintptr(&total, inc)
+ atomic.Xadduintptr(&total, uintptr(-int64(inc)))
})
if total != 0 {
t.Fatalf("xadduintpr total error, want %d, got %d", 0, total)
// Tests that xadduintptr correctly updates 64-bit values. The place where
// we actually do so is mstats.go, functions mSysStat{Inc,Dec}.
func TestXadduintptrOnUint64(t *testing.T) {
- if runtime.BigEndian != 0 {
+ /* if runtime.BigEndian != 0 {
// On big endian architectures, we never use xadduintptr to update
// 64-bit values and hence we skip the test. (Note that functions
// mSysStat{Inc,Dec} in mstats.go have explicit checks for
// big-endianness.)
return
- }
+ }*/
const inc = 100
val := uint64(0)
- runtime.Xadduintptr((*uintptr)(unsafe.Pointer(&val)), inc)
+ atomic.Xadduintptr((*uintptr)(unsafe.Pointer(&val)), inc)
if inc != val {
t.Fatalf("xadduintptr should increase lower-order bits, want %d, got %d", inc, val)
}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+//go:noescape
+func Cas(ptr *uint32, old, new uint32) bool
+
+// NO go:noescape annotation; see atomic_pointer.go.
+func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+
+func nop() // call to prevent inlining of function body
+
+//go:noescape
+func Casuintptr(ptr *uintptr, old, new uintptr) bool
+
+//go:noescape
+func Storeuintptr(ptr *uintptr, new uintptr)
+
+//go:noescape
+func Loaduintptr(ptr *uintptr) uintptr
+
+//go:noescape
+func Loaduint(ptr *uint) uint
+
+// TODO(matloob): Should these functions have the go:noescape annotation?
+
+//go:noescape
+func Loadint64(ptr *int64) int64
+
+//go:noescape
+func Xaddint64(ptr *int64, delta int64) int64
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·armcas(SB)
+
+TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·Cas(SB)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// TODO(minux): this is only valid for ARMv6+
+// bool armcas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·armcas(SB)
+
+TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·Cas(SB)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// Use kernel version instead of native armcas in asm_arm.s.
+// See ../../../sync/atomic/asm_linux_arm.s for details.
+TEXT cas<>(SB),NOSPLIT,$0
+ MOVW $0xffff0fc0, R15 // R15 is hardware PC.
+
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+ MOVW ptr+0(FP), R2
+ MOVW old+4(FP), R0
+loop:
+ MOVW new+8(FP), R1
+ BL cas<>(SB)
+ BCC check
+ MOVW $1, R0
+ MOVB R0, ret+12(FP)
+ RET
+check:
+ // Kernel lies; double-check.
+ MOVW ptr+0(FP), R2
+ MOVW old+4(FP), R0
+ MOVW 0(R2), R3
+ CMP R0, R3
+ BEQ loop
+ MOVW $0, R0
+ MOVB R0, ret+12(FP)
+ RET
+
+TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·Cas(SB)
+
+// As for cas, memory barriers are complicated on ARM, but the kernel
+// provides a user helper. ARMv5 does not support SMP and has no
+// memory barrier instruction at all. ARMv6 added SMP support and has
+// a memory barrier, but it requires writing to a coprocessor
+// register. ARMv7 introduced the DMB instruction, but it's expensive
+// even on single-core devices. The kernel helper takes care of all of
+// this for us.
\ No newline at end of file
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT runtime∕internal∕atomic·Casp(SB),NOSPLIT,$0
+ B runtime·cas(SB)
+
+// This is only valid for ARMv6+, however, NaCl/ARM is only defined
+// for ARMv7A anyway.
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·armcas(SB)
+
+TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·Cas(SB)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// TODO(minux): this is only valid for ARMv6+
+// bool Armcas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// }else
+// return 0;
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·armcas(SB)
+
+TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·Cas(SB)
+
+
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·armcas(SB)
+
+TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·Cas(SB)
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines flags attached to various functions
+// and data objects. The compilers, assemblers, and linker must
+// all agree on these values.
+
+// Don't profile the marked routine. This flag is deprecated.
+#define NOPROF 1
+// It is ok for the linker to get multiple of these symbols. It will
+// pick one of the duplicates to use.
+#define DUPOK 2
+// Don't insert stack check preamble.
+#define NOSPLIT 4
+// Put this data in a read-only section.
+#define RODATA 8
+// This data contains no pointers.
+#define NOPTR 16
+// This is a wrapper function and should not count as disabling 'recover'.
+#define WRAPPER 32
+// This function uses its incoming context register.
+#define NEEDCTXT 64
+// Allocate a word of thread local storage and store the offset from the
+// thread local base to the thread local storage in this variable.
+#define TLSBSS 256
+// Do not insert instructions to allocate a stack frame for this function.
+// Only valid on functions that declare a frame size of 0.
+// TODO(mwhudson): only implemented for ppc64x at present.
+#define NOFRAME 512
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
func lfstackpush(head *uint64, node *lfnode) {
node.pushcnt++
throw("lfstackpush")
}
for {
- old := atomicload64(head)
+ old := atomic.Load64(head)
node.next = old
- if cas64(head, old, new) {
+ if atomic.Cas64(head, old, new) {
break
}
}
func lfstackpop(head *uint64) unsafe.Pointer {
for {
- old := atomicload64(head)
+ old := atomic.Load64(head)
if old == 0 {
return nil
}
node, _ := lfstackUnpack(old)
- next := atomicload64(&node.next)
- if cas64(head, old, next) {
+ next := atomic.Load64(&node.next)
+ if atomic.Cas64(head, old, next) {
return unsafe.Pointer(node)
}
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// This implementation depends on OS-specific implementations of
//
gp.m.locks++
// Speculative grab for lock.
- v := xchg(key32(&l.key), mutex_locked)
+ v := atomic.Xchg(key32(&l.key), mutex_locked)
if v == mutex_unlocked {
return
}
// Try for lock, spinning.
for i := 0; i < spin; i++ {
for l.key == mutex_unlocked {
- if cas(key32(&l.key), mutex_unlocked, wait) {
+ if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
return
}
}
// Try for lock, rescheduling.
for i := 0; i < passive_spin; i++ {
for l.key == mutex_unlocked {
- if cas(key32(&l.key), mutex_unlocked, wait) {
+ if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
return
}
}
}
// Sleep.
- v = xchg(key32(&l.key), mutex_sleeping)
+ v = atomic.Xchg(key32(&l.key), mutex_sleeping)
if v == mutex_unlocked {
return
}
}
func unlock(l *mutex) {
- v := xchg(key32(&l.key), mutex_unlocked)
+ v := atomic.Xchg(key32(&l.key), mutex_unlocked)
if v == mutex_unlocked {
throw("unlock of unlocked lock")
}
}
func notewakeup(n *note) {
- old := xchg(key32(&n.key), 1)
+ old := atomic.Xchg(key32(&n.key), 1)
if old != 0 {
print("notewakeup - double wakeup (", old, ")\n")
throw("notewakeup - double wakeup")
if gp != gp.m.g0 {
throw("notesleep not on g0")
}
- for atomicload(key32(&n.key)) == 0 {
+ for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
futexsleep(key32(&n.key), 0, -1)
gp.m.blocked = false
gp := getg()
if ns < 0 {
- for atomicload(key32(&n.key)) == 0 {
+ for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
futexsleep(key32(&n.key), 0, -1)
gp.m.blocked = false
return true
}
- if atomicload(key32(&n.key)) != 0 {
+ if atomic.Load(key32(&n.key)) != 0 {
return true
}
gp.m.blocked = true
futexsleep(key32(&n.key), 0, ns)
gp.m.blocked = false
- if atomicload(key32(&n.key)) != 0 {
+ if atomic.Load(key32(&n.key)) != 0 {
break
}
now := nanotime()
}
ns = deadline - now
}
- return atomicload(key32(&n.key)) != 0
+ return atomic.Load(key32(&n.key)) != 0
}
func notetsleep(n *note, ns int64) bool {
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// This implementation depends on OS-specific implementations of
//
gp.m.locks++
// Speculative grab for lock.
- if casuintptr(&l.key, 0, locked) {
+ if atomic.Casuintptr(&l.key, 0, locked) {
return
}
if gp.m.waitsema == 0 {
}
Loop:
for i := 0; ; i++ {
- v := atomicloaduintptr(&l.key)
+ v := atomic.Loaduintptr(&l.key)
if v&locked == 0 {
// Unlocked. Try to lock.
- if casuintptr(&l.key, v, v|locked) {
+ if atomic.Casuintptr(&l.key, v, v|locked) {
return
}
i = 0
// Queue this M.
for {
gp.m.nextwaitm = v &^ locked
- if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
+ if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
break
}
- v = atomicloaduintptr(&l.key)
+ v = atomic.Loaduintptr(&l.key)
if v&locked == 0 {
continue Loop
}
gp := getg()
var mp *m
for {
- v := atomicloaduintptr(&l.key)
+ v := atomic.Loaduintptr(&l.key)
if v == locked {
- if casuintptr(&l.key, locked, 0) {
+ if atomic.Casuintptr(&l.key, locked, 0) {
break
}
} else {
// Other M's are waiting for the lock.
// Dequeue an M.
mp = (*m)(unsafe.Pointer(v &^ locked))
- if casuintptr(&l.key, v, mp.nextwaitm) {
+ if atomic.Casuintptr(&l.key, v, mp.nextwaitm) {
// Dequeued an M. Wake it.
semawakeup(mp)
break
func notewakeup(n *note) {
var v uintptr
for {
- v = atomicloaduintptr(&n.key)
- if casuintptr(&n.key, v, locked) {
+ v = atomic.Loaduintptr(&n.key)
+ if atomic.Casuintptr(&n.key, v, locked) {
break
}
}
if gp.m.waitsema == 0 {
gp.m.waitsema = semacreate()
}
- if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
+ if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
// Must be locked (got wakeup).
if n.key != locked {
throw("notesleep - waitm out of sync")
gp = getg()
// Register for wakeup on n->waitm.
- if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
+ if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
// Must be locked (got wakeup).
if n.key != locked {
throw("notetsleep - waitm out of sync")
// so that any notewakeup racing with the return does not
// try to grant us the semaphore when we don't expect it.
for {
- v := atomicloaduintptr(&n.key)
+ v := atomic.Loaduintptr(&n.key)
switch v {
case uintptr(unsafe.Pointer(gp.m)):
// No wakeup yet; unregister if possible.
- if casuintptr(&n.key, v, 0) {
+ if atomic.Casuintptr(&n.key, v, 0) {
return false
}
case locked:
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
bitPointer = 1 << 0
// Might be racing with other updates, so use atomic update always.
// We used to be clever here and use a non-atomic update in certain
// cases, but it's not worth the risk.
- atomicor8(h.bitp, bitMarked<<h.shift)
+ atomic.Or8(h.bitp, bitMarked<<h.shift)
}
// setMarkedNonAtomic sets the marked bit in the heap bits, non-atomically.
// h must describe the initial word of the object.
func (h heapBits) setCheckmarked(size uintptr) {
if size == ptrSize {
- atomicor8(h.bitp, bitPointer<<h.shift)
+ atomic.Or8(h.bitp, bitPointer<<h.shift)
return
}
- atomicor8(h.bitp, bitMarked<<(heapBitsShift+h.shift))
+ atomic.Or8(h.bitp, bitMarked<<(heapBitsShift+h.shift))
}
// heapBitsBulkBarrier executes writebarrierptr_nostore
if gcphase == _GCoff {
*h.bitp |= bitPointer << h.shift
} else {
- atomicor8(h.bitp, bitPointer<<h.shift)
+ atomic.Or8(h.bitp, bitPointer<<h.shift)
}
} else {
// 2-element slice of pointer.
if gcphase == _GCoff {
*h.bitp |= (bitPointer | bitPointer<<heapBitsShift) << h.shift
} else {
- atomicor8(h.bitp, (bitPointer|bitPointer<<heapBitsShift)<<h.shift)
+ atomic.Or8(h.bitp, (bitPointer|bitPointer<<heapBitsShift)<<h.shift)
}
}
return
if gcphase == _GCoff {
*h.bitp |= uint8(hb << h.shift)
} else {
- atomicor8(h.bitp, uint8(hb<<h.shift))
+ atomic.Or8(h.bitp, uint8(hb<<h.shift))
}
return
}
if gcphase == _GCoff {
*hbitp |= uint8(hb)
} else {
- atomicor8(hbitp, uint8(hb))
+ atomic.Or8(hbitp, uint8(hb))
}
hbitp = subtract1(hbitp)
if w += 2; w >= nw {
if gcphase == _GCoff {
*hbitp = *hbitp&^(bitPointer|bitMarked|(bitPointer|bitMarked)<<heapBitsShift) | uint8(hb)
} else {
- atomicand8(hbitp, ^uint8(bitPointer|bitMarked|(bitPointer|bitMarked)<<heapBitsShift))
- atomicor8(hbitp, uint8(hb))
+ atomic.And8(hbitp, ^uint8(bitPointer|bitMarked|(bitPointer|bitMarked)<<heapBitsShift))
+ atomic.Or8(hbitp, uint8(hb))
}
}
package runtime
+import "runtime/internal/atomic"
+
// Central list of free objects of a given size.
type mcentral struct {
lock mutex
retry:
var s *mspan
for s = c.nonempty.first; s != nil; s = s.next {
- if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
+ if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
mSpanList_Remove(&c.nonempty, s)
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
}
for s = c.empty.first; s != nil; s = s.next {
- if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
+ if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
// we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it
mSpanList_Remove(&c.empty, s)
if !mSpan_InList(s) {
throw("can't preserve unlinked span")
}
- atomicstore(&s.sweepgen, mheap_.sweepgen)
+ atomic.Store(&s.sweepgen, mheap_.sweepgen)
return false
}
// the span may be used in an MCache, so it must come after the
// linked list operations above (actually, just after the
// lock of c above.)
- atomicstore(&s.sweepgen, mheap_.sweepgen)
+ atomic.Store(&s.sweepgen, mheap_.sweepgen)
if s.ref != 0 {
unlock(&c.lock)
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
type finblock struct {
alllink *finblock
func createfing() {
// start the finalizer goroutine exactly once
- if fingCreate == 0 && cas(&fingCreate, 0, 1) {
+ if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
go runfinq()
}
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
_DebugGC = 0
//go:nosplit
func setGCPhase(x uint32) {
- atomicstore(&gcphase, x)
+ atomic.Store(&gcphase, x)
writeBarrierEnabled = gcphase == _GCmark || gcphase == _GCmarktermination
}
decIfPositive := func(ptr *int64) bool {
if *ptr > 0 {
- if xaddint64(ptr, -1) >= 0 {
+ if atomic.Xaddint64(ptr, -1) >= 0 {
return true
}
// We lost a race
- xaddint64(ptr, +1)
+ atomic.Xaddint64(ptr, +1)
}
return false
}
timeUsed := c.fractionalMarkTime + gcForcePreemptNS
if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal {
// Nope, we'd overshoot the utilization goal
- xaddint64(&c.fractionalMarkWorkersNeeded, +1)
+ atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1)
return nil
}
_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
// black invariant. Enable mutator assists to
// put back-pressure on fast allocating
// mutators.
- atomicstore(&gcBlackenEnabled, 1)
+ atomic.Store(&gcBlackenEnabled, 1)
// Assists and workers can start the moment we start
// the world.
//
// TODO(austin): Should dedicated workers keep an eye on this
// and exit gcDrain promptly?
- xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff)
- xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff)
+ atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff)
+ atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff)
if !gcBlackenPromptly {
// Transition from mark 1 to mark 2.
// Prevent completion of mark 2 until we've flushed
// cached workbufs.
- xadd(&work.nwait, -1)
+ atomic.Xadd(&work.nwait, -1)
// Rescan global data and BSS. There may still work
// workers running at this point, so bump "jobs" down
// before "next" so they won't try running root jobs
// until we set next.
- atomicstore(&work.markrootJobs, uint32(fixedRootCount+work.nDataRoots+work.nBSSRoots))
- atomicstore(&work.markrootNext, fixedRootCount)
+ atomic.Store(&work.markrootJobs, uint32(fixedRootCount+work.nDataRoots+work.nBSSRoots))
+ atomic.Store(&work.markrootNext, fixedRootCount)
// GC is set up for mark 2. Let Gs blocked on the
// transition lock go while we flush caches.
})
// Now we can start up mark 2 workers.
- xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff)
- xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff)
+ atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff)
+ atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff)
- incnwait := xadd(&work.nwait, +1)
+ incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
// This recursion is safe because the call
// can't take this same "if" branch.
func gcMarkTermination() {
// World is stopped.
// Start marktermination which includes enabling the write barrier.
- atomicstore(&gcBlackenEnabled, 0)
+ atomic.Store(&gcBlackenEnabled, 0)
gcBlackenPromptly = false
setGCPhase(_GCmarktermination)
now, unixNow := nanotime(), unixnanotime()
work.pauseNS += now - work.pauseStart
work.tEnd = now
- atomicstore64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user
+ atomic.Store64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user
memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
memstats.pause_total_ns += uint64(work.pauseNS)
startTime := nanotime()
- decnwait := xadd(&work.nwait, -1)
+ decnwait := atomic.Xadd(&work.nwait, -1)
if decnwait == work.nproc {
println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
throw("work.nwait was > work.nproc")
duration := nanotime() - startTime
switch p.gcMarkWorkerMode {
case gcMarkWorkerDedicatedMode:
- xaddint64(&gcController.dedicatedMarkTime, duration)
- xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1)
+ atomic.Xaddint64(&gcController.dedicatedMarkTime, duration)
+ atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1)
case gcMarkWorkerFractionalMode:
- xaddint64(&gcController.fractionalMarkTime, duration)
- xaddint64(&gcController.fractionalMarkWorkersNeeded, 1)
+ atomic.Xaddint64(&gcController.fractionalMarkTime, duration)
+ atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1)
case gcMarkWorkerIdleMode:
- xaddint64(&gcController.idleMarkTime, duration)
+ atomic.Xaddint64(&gcController.idleMarkTime, duration)
}
// Was this the last worker and did we run out
// of work?
- incnwait := xadd(&work.nwait, +1)
+ incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait > work.nproc {
println("runtime: p.gcMarkWorkerMode=", p.gcMarkWorkerMode,
"work.nwait=", incnwait, "work.nproc=", work.nproc)
if p != nil && !p.gcw.empty() {
return true
}
- if atomicload64(&work.full) != 0 {
+ if atomic.Load64(&work.full) != 0 {
return true // global work available
}
if work.markrootNext < work.markrootJobs {
}
nproc := work.nproc // work.nproc can change right after we increment work.ndone
- if xadd(&work.ndone, +1) == nproc-1 {
+ if atomic.Xadd(&work.ndone, +1) == nproc-1 {
notewakeup(&work.alldone)
}
_g_.m.traceback = 0
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
fixedRootFinalizers = iota
// roots they create during the concurrent phase will be
// scanned during mark termination. During mark termination,
// allglen isn't changing, so we'll scan all Gs.
- work.nStackRoots = int(atomicloaduintptr(&allglen))
+ work.nStackRoots = int(atomic.Loaduintptr(&allglen))
work.markrootNext = 0
work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
// will just cause steals to fail until credit is accumulated
// again, so in the long run it doesn't really matter, but we
// do have to handle the negative credit case.
- bgScanCredit := atomicloadint64(&gcController.bgScanCredit)
+ bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
stolen := int64(0)
if bgScanCredit > 0 {
if bgScanCredit < scanWork {
stolen = scanWork
gp.gcAssistBytes += debtBytes
}
- xaddint64(&gcController.bgScanCredit, -stolen)
+ atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
scanWork -= stolen
// Perform assist work
completed := false
systemstack(func() {
- if atomicload(&gcBlackenEnabled) == 0 {
+ if atomic.Load(&gcBlackenEnabled) == 0 {
// The gcBlackenEnabled check in malloc races with the
// store that clears it but an atomic check in every malloc
// would be a performance hit.
// just measure start and end time.
startTime := nanotime()
- decnwait := xadd(&work.nwait, -1)
+ decnwait := atomic.Xadd(&work.nwait, -1)
if decnwait == work.nproc {
println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
throw("nwait > work.nprocs")
// If this is the last worker and we ran out of work,
// signal a completion point.
- incnwait := xadd(&work.nwait, +1)
+ incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait > work.nproc {
println("runtime: work.nwait=", incnwait,
"work.nproc=", work.nproc,
_p_ := gp.m.p.ptr()
_p_.gcAssistTime += duration
if _p_.gcAssistTime > gcAssistTimeSlack {
- xaddint64(&gcController.assistTime, _p_.gcAssistTime)
+ atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
_p_.gcAssistTime = 0
}
})
// likely path if we completed above. We do this
// under the lock to prevent a GC cycle from ending
// between this check and queuing the assist.
- if atomicload(&gcBlackenEnabled) == 0 {
+ if atomic.Load(&gcBlackenEnabled) == 0 {
unlock(&work.assistQueue.lock)
return
}
// the queue, but can still back out. This avoids a
// race in case background marking has flushed more
// credit since we checked above.
- if atomicloadint64(&gcController.bgScanCredit) > 0 {
+ if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
work.assistQueue.head = oldHead
work.assistQueue.tail = oldTail
if oldTail != 0 {
// small window here where an assist may add itself to
// the blocked queue and park. If that happens, we'll
// just get it on the next flush.
- xaddint64(&gcController.bgScanCredit, scanWork)
+ atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
return
}
if scanBytes > 0 {
// Convert from scan bytes back to work.
scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
- xaddint64(&gcController.bgScanCredit, scanWork)
+ atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
}
unlock(&work.assistQueue.lock)
}
// Drain root marking jobs.
if work.markrootNext < work.markrootJobs {
for blocking || !gp.preempt {
- job := xadd(&work.markrootNext, +1) - 1
+ job := atomic.Xadd(&work.markrootNext, +1) - 1
if job >= work.markrootJobs {
break
}
// account if we've accumulated enough locally so
// mutator assists can draw on it.
if gcw.scanWork >= gcCreditSlack {
- xaddint64(&gcController.scanWork, gcw.scanWork)
+ atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
if flushBgCredit {
gcFlushBgCredit(gcw.scanWork - initScanWork)
initScanWork = 0
// Flush remaining scan work credit.
if gcw.scanWork > 0 {
- xaddint64(&gcController.scanWork, gcw.scanWork)
+ atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
if flushBgCredit {
gcFlushBgCredit(gcw.scanWork - initScanWork)
}
// Flush background scan work credit.
if gcw.scanWork >= gcCreditSlack {
- xaddint64(&gcController.scanWork, gcw.scanWork)
+ atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
workFlushed += gcw.scanWork
gcw.scanWork = 0
}
throw("gcmarknewobject called while doing checkmark")
}
heapBitsForAddr(obj).setMarked()
- xadd64(&work.bytesMarked, int64(size))
+ atomic.Xadd64(&work.bytesMarked, int64(size))
}
// Checkmarking
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
var sweep sweepdata
_g_.m.locks++
sg := mheap_.sweepgen
for {
- idx := xadd(&sweep.spanidx, 1) - 1
+ idx := atomic.Xadd(&sweep.spanidx, 1) - 1
if idx >= uint32(len(work.spans)) {
mheap_.sweepdone = 1
_g_.m.locks--
s.sweepgen = sg
continue
}
- if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) {
+ if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
continue
}
npages := s.npages
}
sg := mheap_.sweepgen
- if atomicload(&s.sweepgen) == sg {
+ if atomic.Load(&s.sweepgen) == sg {
return
}
// The caller must be sure that the span is a MSpanInUse span.
- if cas(&s.sweepgen, sg-2, sg-1) {
+ if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
mSpan_Sweep(s, false)
return
}
// unfortunate condition, and we don't have efficient means to wait
- for atomicload(&s.sweepgen) != sg {
+ for atomic.Load(&s.sweepgen) != sg {
osyield()
}
}
traceGCSweepStart()
}
- xadd64(&mheap_.pagesSwept, int64(s.npages))
+ atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
cl := s.sizeclass
size := s.elemsize
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("MSpan_Sweep: bad span state after sweep")
}
- atomicstore(&s.sweepgen, sweepgen)
+ atomic.Store(&s.sweepgen, sweepgen)
}
if nfree > 0 {
c.local_nsmallfree[cl] += uintptr(nfree)
}
// Account for this span allocation.
- spanBytesAlloc := xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
+ spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
// Fix debt if necessary.
pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
- for pagesOwed-int64(atomicload64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
+ for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
if gosweepone() == ^uintptr(0) {
mheap_.sweepPagesPerByte = 0
break
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
_Debugwbufs = false // if true check wbufs consistency
// atomic becomes a problem, we should first try to
// dispose less and if necessary aggregate in a per-P
// counter.
- xadd64(&work.bytesMarked, int64(w.bytesMarked))
+ atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
w.bytesMarked = 0
}
if w.scanWork != 0 {
- xaddint64(&gcController.scanWork, w.scanWork)
+ atomic.Xaddint64(&gcController.scanWork, w.scanWork)
w.scanWork = 0
}
}
return b
}
- incnwait := xadd(&work.nwait, +1)
+ incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait > work.nproc {
println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
}
for i := 0; ; i++ {
if work.full != 0 {
- decnwait := xadd(&work.nwait, -1)
+ decnwait := atomic.Xadd(&work.nwait, -1)
if decnwait == work.nproc {
println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
b.checknonempty()
return b
}
- incnwait := xadd(&work.nwait, +1)
+ incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait > work.nproc {
println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
sg := mheap_.sweepgen
retry:
for s := list.first; s != nil; s = s.next {
- if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
+ if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
mSpanList_Remove(list, s)
// swept spans are at the end of the list
mSpanList_InsertBack(list, s)
if s != nil {
// Record span info, because gc needs to be
// able to map interior pointer to containing span.
- atomicstore(&s.sweepgen, h.sweepgen)
+ atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse
s.freelist = 0
s.ref = 0
for i := p; i < p+s.npages; i++ {
h_spans[i] = s
}
- atomicstore(&s.sweepgen, h.sweepgen)
+ atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse
h.pagesInUse += uint64(npage)
mHeap_FreeSpanLocked(h, s, false, true, 0)
package runtime
import (
+ "runtime/internal/atomic"
"unsafe"
)
}
}
- atomicstore64(&blockprofilerate, uint64(r))
+ atomic.Store64(&blockprofilerate, uint64(r))
}
func blockevent(cycles int64, skip int) {
if cycles <= 0 {
cycles = 1
}
- rate := int64(atomicload64(&blockprofilerate))
+ rate := int64(atomic.Load64(&blockprofilerate))
if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
return
}
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
- first := (*m)(atomicloadp(unsafe.Pointer(&allm)))
+ first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
n++
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Statistics.
// If you edit this structure, also edit type MemStats below.
//go:nosplit
func mSysStatInc(sysStat *uint64, n uintptr) {
if _BigEndian != 0 {
- xadd64(sysStat, int64(n))
+ atomic.Xadd64(sysStat, int64(n))
return
}
- if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
+ if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
print("runtime: stat overflow: val ", val, ", n ", n, "\n")
exit(2)
}
//go:nosplit
func mSysStatDec(sysStat *uint64, n uintptr) {
if _BigEndian != 0 {
- xadd64(sysStat, -int64(n))
+ atomic.Xadd64(sysStat, -int64(n))
return
}
- if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
+ if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
print("runtime: stat underflow: val ", val, ", n ", n, "\n")
exit(2)
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Integrated network poller (platform-independent part).
// A particular implementation (epoll/kqueue) must define the following functions:
//go:linkname net_runtime_pollServerInit net.runtime_pollServerInit
func net_runtime_pollServerInit() {
netpollinit()
- atomicstore(&netpollInited, 1)
+ atomic.Store(&netpollInited, 1)
}
func netpollinited() bool {
- return atomicload(&netpollInited) != 0
+ return atomic.Load(&netpollInited) != 0
}
//go:linkname net_runtime_pollOpen net.runtime_pollOpen
}
func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
- return casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
+ return atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
}
// returns true if IO is ready, or false if timedout or closed
if old != 0 {
throw("netpollblock: double wait")
}
- if casuintptr(gpp, 0, pdWait) {
+ if atomic.Casuintptr(gpp, 0, pdWait) {
break
}
}
gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait", traceEvGoBlockNet, 5)
}
// be careful to not lose concurrent READY notification
- old := xchguintptr(gpp, 0)
+ old := atomic.Xchguintptr(gpp, 0)
if old > pdWait {
throw("netpollblock: corrupted state")
}
if ioready {
new = pdReady
}
- if casuintptr(gpp, old, new) {
+ if atomic.Casuintptr(gpp, old, new) {
if old == pdReady || old == pdWait {
old = 0
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
_ESRCH = 3
}
for {
- v := atomicload(&_g_.m.waitsemacount)
+ v := atomic.Load(&_g_.m.waitsemacount)
if v > 0 {
- if cas(&_g_.m.waitsemacount, v, v-1) {
+ if atomic.Cas(&_g_.m.waitsemacount, v, v-1) {
return 0 // semaphore acquired
}
continue
//go:nosplit
func semawakeup(mp *m) {
- xadd(&mp.waitsemacount, 1)
+ atomic.Xadd(&mp.waitsemacount, 1)
// From NetBSD's _lwp_unpark(2) manual:
// "If the target LWP is not currently waiting, it will return
// immediately upon the next call to _lwp_park()."
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
_ESRCH = 3
}
for {
- v := atomicload(&_g_.m.waitsemacount)
+ v := atomic.Load(&_g_.m.waitsemacount)
if v > 0 {
- if cas(&_g_.m.waitsemacount, v, v-1) {
+ if atomic.Cas(&_g_.m.waitsemacount, v, v-1) {
return 0 // semaphore acquired
}
continue
//go:nosplit
func semawakeup(mp *m) {
- xadd(&mp.waitsemacount, 1)
+ atomic.Xadd(&mp.waitsemacount, 1)
ret := thrwakeup(uintptr(unsafe.Pointer(&mp.waitsemacount)), 1)
if ret != 0 && ret != _ESRCH {
// semawakeup can be called on signal stack.
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
n := copy(buf[:], goexits)
n = copy(buf[n:], gostringnocopy(status))
pid := getpid()
- for mp := (*m)(atomicloadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
+ for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
if mp.procid != pid {
postnote(mp.procid, buf[:])
}
package runtime
import (
+ "runtime/internal/atomic"
"unsafe"
)
func minit() {
var thandle uintptr
stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS)
- atomicstoreuintptr(&getg().m.thread, thandle)
+ atomic.Storeuintptr(&getg().m.thread, thandle)
}
// Called from dropm to undo the effect of an minit.
for {
stdcall2(_WaitForSingleObject, profiletimer, _INFINITE)
- first := (*m)(atomicloadp(unsafe.Pointer(&allm)))
+ first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
- thread := atomicloaduintptr(&mp.thread)
+ thread := atomic.Loaduintptr(&mp.thread)
// Do not profile threads blocked on Notes,
// this includes idle worker threads,
// idle timer thread, idle heap scavenger, etc.
lock(&cpuprofilerlock)
if profiletimer == 0 {
timer := stdcall3(_CreateWaitableTimerA, 0, 0, 0)
- atomicstoreuintptr(&profiletimer, timer)
+ atomic.Storeuintptr(&profiletimer, timer)
thread := stdcall6(_CreateThread, 0, 0, funcPC(profileloop), 0, 0, 0)
stdcall2(_SetThreadPriority, thread, _THREAD_PRIORITY_HIGHEST)
stdcall1(_CloseHandle, thread)
due = int64(ms) * -10000
}
stdcall6(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
- atomicstore((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
+ atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
}
func memlimit() uintptr {
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
var indexError = error(errorString("index out of range"))
case 0:
_g_.m.dying = 1
_g_.writebuf = nil
- xadd(&panicking, 1)
+ atomic.Xadd(&panicking, 1)
lock(&paniclk)
if debug.schedtrace > 0 || debug.scheddetail > 0 {
schedtrace(true)
}
unlock(&paniclk)
- if xadd(&panicking, -1) != 0 {
+ if atomic.Xadd(&panicking, -1) != 0 {
// Some other m is panicking too.
// Let it print what it needs to print.
// Wait forever without chewing up cpu.
package runtime
+import "runtime/internal/atomic"
+
// A parfor holds state for the parallel for operation.
type parfor struct {
body func(*parfor, uint32) // executed for each element
func parfordo(desc *parfor) {
// Obtain 0-based thread index.
- tid := xadd(&desc.thrseq, 1) - 1
+ tid := atomic.Xadd(&desc.thrseq, 1) - 1
if tid >= desc.nthr {
print("tid=", tid, " nthr=", desc.nthr, "\n")
throw("parfor: invalid tid")
for {
// While there is local work,
// bump low index and execute the iteration.
- pos := xadd64(mypos, 1)
+ pos := atomic.Xadd64(mypos, 1)
begin := uint32(pos) - 1
end := uint32(pos >> 32)
if begin < end {
// increment the done counter...
if try > desc.nthr*4 && !idle {
idle = true
- xadd(&desc.done, 1)
+ atomic.Xadd(&desc.done, 1)
}
// ...if all threads have incremented the counter,
}
if desc.done+extra == desc.nthr {
if !idle {
- xadd(&desc.done, 1)
+ atomic.Xadd(&desc.done, 1)
}
goto exit
}
victimpos := &desc.thr[victim].pos
for {
// See if it has any work.
- pos := atomicload64(victimpos)
+ pos := atomic.Load64(victimpos)
begin = uint32(pos)
end = uint32(pos >> 32)
if begin+1 >= end {
break
}
if idle {
- xadd(&desc.done, -1)
+ atomic.Xadd(&desc.done, -1)
idle = false
}
begin2 := begin + (end-begin)/2
newpos := uint64(begin) | uint64(begin2)<<32
- if cas64(victimpos, pos, newpos) {
+ if atomic.Cas64(victimpos, pos, newpos) {
begin = begin2
break
}
if idle {
throw("parfor: should not be idle")
}
- atomicstore64(mypos, uint64(begin)|uint64(end)<<32)
+ atomic.Store64(mypos, uint64(begin)|uint64(end)<<32)
me.nsteal++
me.nstealcnt += uint64(end) - uint64(begin)
break
// If a caller asked not to wait for the others, exit now
// (assume that most work is already done at this point).
if !idle {
- xadd(&desc.done, 1)
+ atomic.Xadd(&desc.done, 1)
}
goto exit
} else if try < 6*desc.nthr {
}
exit:
- xadd64(&desc.nsteal, int64(me.nsteal))
- xadd64(&desc.nstealcnt, int64(me.nstealcnt))
- xadd64(&desc.nprocyield, int64(me.nprocyield))
- xadd64(&desc.nosyield, int64(me.nosyield))
- xadd64(&desc.nsleep, int64(me.nsleep))
+ atomic.Xadd64(&desc.nsteal, int64(me.nsteal))
+ atomic.Xadd64(&desc.nstealcnt, int64(me.nstealcnt))
+ atomic.Xadd64(&desc.nprocyield, int64(me.nprocyield))
+ atomic.Xadd64(&desc.nosyield, int64(me.nosyield))
+ atomic.Xadd64(&desc.nsleep, int64(me.nsleep))
me.nsteal = 0
me.nstealcnt = 0
me.nprocyield = 0
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Goroutine scheduler
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
if forcegc.idle != 0 {
throw("forcegc: phase error")
}
- atomicstore(&forcegc.idle, 1)
+ atomic.Store(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable)
runqput(_g_.m.p.ptr(), gp, true)
- if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic
wakep()
}
_g_.m.locks--
- if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
+ if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
_g_.stackguard0 = stackPreempt
}
}
for i := 0; i < 5; i++ {
// this should tell the scheduler to not start any new goroutines
sched.stopwait = freezeStopWait
- atomicstore(&sched.gcwaiting, 1)
+ atomic.Store(&sched.gcwaiting, 1)
// this should stop running goroutines
if !preemptall() {
break // no running goroutines
// castogscanstatus, casfrom_Gscanstatus.
//go:nosplit
func readgstatus(gp *g) uint32 {
- return atomicload(&gp.atomicstatus)
+ return atomic.Load(&gp.atomicstatus)
}
// Ownership of gscanvalid:
_Gscanrunning,
_Gscansyscall:
if newval == oldval&^_Gscan {
- success = cas(&gp.atomicstatus, oldval, newval)
+ success = atomic.Cas(&gp.atomicstatus, oldval, newval)
}
case _Gscanenqueue:
if newval == _Gwaiting {
- success = cas(&gp.atomicstatus, oldval, newval)
+ success = atomic.Cas(&gp.atomicstatus, oldval, newval)
}
}
if !success {
_Gwaiting,
_Gsyscall:
if newval == oldval|_Gscan {
- return cas(&gp.atomicstatus, oldval, newval)
+ return atomic.Cas(&gp.atomicstatus, oldval, newval)
}
case _Grunning:
if newval == _Gscanrunning || newval == _Gscanenqueue {
- return cas(&gp.atomicstatus, oldval, newval)
+ return atomic.Cas(&gp.atomicstatus, oldval, newval)
}
}
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
// loop if gp->atomicstatus is in a scan state giving
// GC time to finish and change the state to oldval.
- for !cas(&gp.atomicstatus, oldval, newval) {
+ for !atomic.Cas(&gp.atomicstatus, oldval, newval) {
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
systemstack(func() {
throw("casgstatus: waiting for Gwaiting but is Grunnable")
if oldstatus != _Gwaiting && oldstatus != _Grunnable {
throw("copystack: bad status, not Gwaiting or Grunnable")
}
- if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
+ if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
return oldstatus
}
}
if !gp.gcscandone {
// Coordinate with traceback
// in sigprof.
- for !cas(&gp.stackLock, 0, 1) {
+ for !atomic.Cas(&gp.stackLock, 0, 1) {
osyield()
}
scanstack(gp)
- atomicstore(&gp.stackLock, 0)
+ atomic.Store(&gp.stackLock, 0)
gp.gcscandone = true
}
restartg(gp)
lock(&sched.lock)
sched.stopwait = gomaxprocs
- atomicstore(&sched.gcwaiting, 1)
+ atomic.Store(&sched.gcwaiting, 1)
preemptall()
// stop current P
_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
for i := 0; i < int(gomaxprocs); i++ {
p := allp[i]
s := p.status
- if s == _Psyscall && cas(&p.status, s, _Pgcstop) {
+ if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
if trace.enabled {
traceGoSysBlock(p)
traceProcStop(p)
// Wakeup an additional proc in case we have excessive runnable goroutines
// in local queues or in the global queue. If we don't, the proc will park itself.
// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
- if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 {
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
wakep()
}
// Ask all Ps to run the safe point function.
for _, p := range allp[:gomaxprocs] {
if p != _p_ {
- atomicstore(&p.runSafePointFn, 1)
+ atomic.Store(&p.runSafePointFn, 1)
}
}
preemptall()
// Run safe point function for all idle Ps. sched.pidle will
// not change because we hold sched.lock.
for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
- if cas(&p.runSafePointFn, 1, 0) {
+ if atomic.Cas(&p.runSafePointFn, 1, 0) {
fn(p)
sched.safePointWait--
}
for i := 0; i < int(gomaxprocs); i++ {
p := allp[i]
s := p.status
- if s == _Psyscall && p.runSafePointFn == 1 && cas(&p.status, s, _Pidle) {
+ if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
if trace.enabled {
traceGoSysBlock(p)
traceProcStop(p)
// Resolve the race between forEachP running the safe-point
// function on this P's behalf and this P running the
// safe-point function directly.
- if !cas(&p.runSafePointFn, 1, 0) {
+ if !atomic.Cas(&p.runSafePointFn, 1, 0) {
return
}
sched.safePointFn(p)
mp.locked = _LockInternal
mp.lockedg = gp
gp.lockedm = mp
- gp.goid = int64(xadd64(&sched.goidgen, 1))
+ gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
if raceenabled {
gp.racectx = racegostart(funcPC(newextram))
}
const locked = 1
for {
- old := atomicloaduintptr(&extram)
+ old := atomic.Loaduintptr(&extram)
if old == locked {
yield := osyield
yield()
usleep(1)
continue
}
- if casuintptr(&extram, old, locked) {
+ if atomic.Casuintptr(&extram, old, locked) {
return (*m)(unsafe.Pointer(old))
}
yield := osyield
//go:nosplit
func unlockextra(mp *m) {
- atomicstoreuintptr(&extram, uintptr(unsafe.Pointer(mp)))
+ atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
// Create a new m. It will start off with a call to fn, or else the scheduler.
}
if _g_.m.spinning {
_g_.m.spinning = false
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
retry:
// Something (presumably the GC) was readied while the
// runtime was starting up this M, so the M is no
// longer spinning.
- if int32(xadd(&sched.nmspinning, -1)) < 0 {
+ if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("mspinning: nmspinning underflowed")
}
} else {
if _p_ == nil {
unlock(&sched.lock)
if spinning {
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
return
}
}
// no local work, check that there are no spinning/idle M's,
// otherwise our help is not required
- if atomicload(&sched.nmspinning)+atomicload(&sched.npidle) == 0 && cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
+ if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
startm(_p_, true)
return
}
unlock(&sched.lock)
return
}
- if _p_.runSafePointFn != 0 && cas(&_p_.runSafePointFn, 1, 0) {
+ if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
sched.safePointFn(_p_)
sched.safePointWait--
if sched.safePointWait == 0 {
}
// If this is the last running P and nobody is polling network,
// need to wakeup another M to poll network.
- if sched.npidle == uint32(gomaxprocs-1) && atomicload64(&sched.lastpoll) != 0 {
+ if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
unlock(&sched.lock)
startm(_p_, false)
return
// Called when a G is made runnable (newproc, ready).
func wakep() {
// be conservative about spinning threads
- if !cas(&sched.nmspinning, 0, 1) {
+ if !atomic.Cas(&sched.nmspinning, 0, 1) {
return
}
startm(nil, true)
}
if _g_.m.spinning {
_g_.m.spinning = false
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
_p_ := releasep()
lock(&sched.lock)
// If number of spinning M's >= number of busy P's, block.
// This is necessary to prevent excessive CPU consumption
// when GOMAXPROCS>>1 but the program parallelism is low.
- if !_g_.m.spinning && 2*atomicload(&sched.nmspinning) >= uint32(gomaxprocs)-atomicload(&sched.npidle) { // TODO: fast atomic
+ if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= uint32(gomaxprocs)-atomic.Load(&sched.npidle) { // TODO: fast atomic
goto stop
}
if !_g_.m.spinning {
_g_.m.spinning = true
- xadd(&sched.nmspinning, 1)
+ atomic.Xadd(&sched.nmspinning, 1)
}
// random steal from other P's
for i := 0; i < int(4*gomaxprocs); i++ {
unlock(&sched.lock)
if _g_.m.spinning {
_g_.m.spinning = false
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
// check all runqueues once again
}
// poll network
- if netpollinited() && xchg64(&sched.lastpoll, 0) != 0 {
+ if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
if _g_.m.p != 0 {
throw("findrunnable: netpoll with p")
}
throw("findrunnable: netpoll with spinning")
}
gp := netpoll(true) // block until new work is available
- atomicstore64(&sched.lastpoll, uint64(nanotime()))
+ atomic.Store64(&sched.lastpoll, uint64(nanotime()))
if gp != nil {
lock(&sched.lock)
_p_ = pidleget()
var nmspinning uint32
if _g_.m.spinning {
_g_.m.spinning = false
- nmspinning = xadd(&sched.nmspinning, -1)
+ nmspinning = atomic.Xadd(&sched.nmspinning, -1)
if int32(nmspinning) < 0 {
throw("findrunnable: negative nmspinning")
}
} else {
- nmspinning = atomicload(&sched.nmspinning)
+ nmspinning = atomic.Load(&sched.nmspinning)
}
// M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
// so see if we need to wakeup another P here.
- if nmspinning == 0 && atomicload(&sched.npidle) > 0 {
+ if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
wakep()
}
}
save(pc, sp)
}
- if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic
+ if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic
systemstack(entersyscall_sysmon)
save(pc, sp)
}
_g_.sysblocktraced = true
_g_.m.mcache = nil
_g_.m.p.ptr().m = 0
- atomicstore(&_g_.m.p.ptr().status, _Psyscall)
+ atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
if sched.gcwaiting != 0 {
systemstack(entersyscall_gcwait)
save(pc, sp)
func entersyscall_sysmon() {
lock(&sched.lock)
- if atomicload(&sched.sysmonwait) != 0 {
- atomicstore(&sched.sysmonwait, 0)
+ if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
_p_ := _g_.m.p.ptr()
lock(&sched.lock)
- if sched.stopwait > 0 && cas(&_p_.status, _Psyscall, _Pgcstop) {
+ if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
if trace.enabled {
traceGoSysBlock(_p_)
traceProcStop(_p_)
}
// Try to re-acquire the last P.
- if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
+ if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
// There's a cpu for us, so we can run.
_g_.m.mcache = _g_.m.p.ptr().mcache
_g_.m.p.ptr().m.set(_g_.m)
func exitsyscallfast_pidle() bool {
lock(&sched.lock)
_p_ := pidleget()
- if _p_ != nil && atomicload(&sched.sysmonwait) != 0 {
- atomicstore(&sched.sysmonwait, 0)
+ if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
_p_ := pidleget()
if _p_ == nil {
globrunqput(gp)
- } else if atomicload(&sched.sysmonwait) != 0 {
- atomicstore(&sched.sysmonwait, 0)
+ } else if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
- _p_.goidcache = xadd64(&sched.goidgen, _GoidCacheBatch)
+ _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
_p_.goidcache -= _GoidCacheBatch - 1
_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
}
}
runqput(_p_, newg, true)
- if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
wakep()
}
_g_.m.locks--
mp.mallocing++
// Coordinate with stack barrier insertion in scanstack.
- for !cas(&gp.stackLock, 0, 1) {
+ for !atomic.Cas(&gp.stackLock, 0, 1) {
osyield()
}
}
}
}
- atomicstore(&gp.stackLock, 0)
+ atomic.Store(&gp.stackLock, 0)
if prof.hz != 0 {
// Simple cas-lock to coordinate with setcpuprofilerate.
- for !cas(&prof.lock, 0, 1) {
+ for !atomic.Cas(&prof.lock, 0, 1) {
osyield()
}
if prof.hz != 0 {
cpuprof.add(stk[:n])
}
- atomicstore(&prof.lock, 0)
+ atomic.Store(&prof.lock, 0)
}
mp.mallocing--
}
// it would deadlock.
resetcpuprofiler(0)
- for !cas(&prof.lock, 0, 1) {
+ for !atomic.Cas(&prof.lock, 0, 1) {
osyield()
}
prof.hz = hz
- atomicstore(&prof.lock, 0)
+ atomic.Store(&prof.lock, 0)
lock(&sched.lock)
sched.profilehz = hz
}
}
var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
- atomicstore((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
+ atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
return runnablePs
}
delay = 10 * 1000
}
usleep(delay)
- if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
+ if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
lock(&sched.lock)
- if atomicload(&sched.gcwaiting) != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs) {
- atomicstore(&sched.sysmonwait, 1)
+ if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
+ atomic.Store(&sched.sysmonwait, 1)
unlock(&sched.lock)
// Make wake-up period small enough
// for the sampling to be correct.
}
notetsleep(&sched.sysmonnote, maxsleep)
lock(&sched.lock)
- atomicstore(&sched.sysmonwait, 0)
+ atomic.Store(&sched.sysmonwait, 0)
noteclear(&sched.sysmonnote)
idle = 0
delay = 20
unlock(&sched.lock)
}
// poll network if not polled for more than 10ms
- lastpoll := int64(atomicload64(&sched.lastpoll))
+ lastpoll := int64(atomic.Load64(&sched.lastpoll))
now := nanotime()
unixnow := unixnanotime()
if lastpoll != 0 && lastpoll+10*1000*1000 < now {
- cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
+ atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
gp := netpoll(false) // non-blocking - returns list of goroutines
if gp != nil {
// Need to decrement number of idle locked M's
idle++
}
// check if we need to force a GC
- lastgc := int64(atomicload64(&memstats.last_gc))
- if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 {
+ lastgc := int64(atomic.Load64(&memstats.last_gc))
+ if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 {
lock(&forcegc.lock)
forcegc.idle = 0
forcegc.g.schedlink = 0
// On the one hand we don't want to retake Ps if there is no other work to do,
// but on the other hand we want to retake them eventually
// because they can prevent the sysmon thread from deep sleep.
- if runqempty(_p_) && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
+ if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
continue
}
// Need to decrement number of idle locked M's
// Otherwise the M from which we retake can exit the syscall,
// increment nmidle and report deadlock.
incidlelocked(-1)
- if cas(&_p_.status, s, _Pidle) {
+ if atomic.Cas(&_p_.status, s, _Pidle) {
if trace.enabled {
traceGoSysBlock(_p_)
traceProcStop(_p_)
continue
}
mp := _p_.m.ptr()
- h := atomicload(&_p_.runqhead)
- t := atomicload(&_p_.runqtail)
+ h := atomic.Load(&_p_.runqhead)
+ t := atomic.Load(&_p_.runqtail)
if detailed {
id := int32(-1)
if mp != nil {
}
_p_.link = sched.pidle
sched.pidle.set(_p_)
- xadd(&sched.npidle, 1) // TODO: fast atomic
+ atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
}
// Try get a p from _Pidle list.
_p_ := sched.pidle.ptr()
if _p_ != nil {
sched.pidle = _p_.link
- xadd(&sched.npidle, -1) // TODO: fast atomic
+ atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
}
return _p_
}
}
retry:
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
t := _p_.runqtail
if t-h < uint32(len(_p_.runq)) {
_p_.runq[t%uint32(len(_p_.runq))].set(gp)
- atomicstore(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
+ atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
return
}
if runqputslow(_p_, gp, h, t) {
for i := uint32(0); i < n; i++ {
batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
}
- if !cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
return false
}
batch[n] = gp
}
for {
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
t := _p_.runqtail
if t == h {
return nil, false
}
gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
- if cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
+ if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
return gp, false
}
}
// Can be executed by any P.
func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
for {
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := atomicload(&_p_.runqtail) // load-acquire, synchronize with the producer
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
n := t - h
n = n - n/2
if n == 0 {
g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
batch[(batchHead+i)%uint32(len(batch))] = g
}
- if cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
return n
}
}
if n == 0 {
return gp
}
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
if t-h+n >= uint32(len(_p_.runq)) {
throw("runqsteal: runq overflow")
}
- atomicstore(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
+ atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
return gp
}
package runtime
-import _ "unsafe" // for go:linkname
+import (
+ "runtime/internal/atomic"
+ _ "unsafe" // for go:linkname
+)
//go:generate go run wincallback.go
//go:generate go run mkduff.go
// Note: Called by runtime/pprof in addition to runtime code.
func tickspersecond() int64 {
- r := int64(atomicload64(&ticks.val))
+ r := int64(atomic.Load64(&ticks.val))
if r != 0 {
return r
}
if r == 0 {
r++
}
- atomicstore64(&ticks.val, uint64(r))
+ atomic.Store64(&ticks.val, uint64(r))
}
unlock(&ticks.lock)
return r
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Keep a cached value to make gotraceback fast,
// since we call it on every call to gentraceback.
prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
- if cas64(&test_z64, test_x64, 1) {
+ if atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 0 {
throw("cas64 failed")
}
test_x64 = 42
- if !cas64(&test_z64, test_x64, 1) {
+ if !atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 42 || test_z64 != 1 {
throw("cas64 failed")
}
- if atomicload64(&test_z64) != 1 {
+ if atomic.Load64(&test_z64) != 1 {
throw("load64 failed")
}
- atomicstore64(&test_z64, (1<<40)+1)
- if atomicload64(&test_z64) != (1<<40)+1 {
+ atomic.Store64(&test_z64, (1<<40)+1)
+ if atomic.Load64(&test_z64) != (1<<40)+1 {
throw("store64 failed")
}
- if xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
+ if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
throw("xadd64 failed")
}
- if atomicload64(&test_z64) != (2<<40)+2 {
+ if atomic.Load64(&test_z64) != (2<<40)+2 {
throw("xadd64 failed")
}
- if xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
+ if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
throw("xchg64 failed")
}
- if atomicload64(&test_z64) != (3<<40)+3 {
+ if atomic.Load64(&test_z64) != (3<<40)+3 {
throw("xchg64 failed")
}
}
var z uint32
z = 1
- if !cas(&z, 1, 2) {
+ if !atomic.Cas(&z, 1, 2) {
throw("cas1")
}
if z != 2 {
}
z = 4
- if cas(&z, 5, 6) {
+ if atomic.Cas(&z, 5, 6) {
throw("cas3")
}
if z != 4 {
}
z = 0xffffffff
- if !cas(&z, 0xffffffff, 0xfffffffe) {
+ if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
throw("cas5")
}
if z != 0xfffffffe {
}
m = [4]byte{1, 1, 1, 1}
- atomicor8(&m[1], 0xf0)
+ atomic.Or8(&m[1], 0xf0)
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
throw("atomicor8")
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
/*
* defined constants
func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
func (gp *guintptr) cas(old, new guintptr) bool {
- return casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
+ return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
}
type puintptr uintptr
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Asynchronous semaphore for sync.Mutex.
for {
lock(&root.lock)
// Add ourselves to nwait to disable "easy case" in semrelease.
- xadd(&root.nwait, 1)
+ atomic.Xadd(&root.nwait, 1)
// Check cansemacquire to avoid missed wakeup.
if cansemacquire(addr) {
- xadd(&root.nwait, -1)
+ atomic.Xadd(&root.nwait, -1)
unlock(&root.lock)
break
}
func semrelease(addr *uint32) {
root := semroot(addr)
- xadd(addr, 1)
+ atomic.Xadd(addr, 1)
// Easy case: no waiters?
// This check must happen after the xadd, to avoid a missed wakeup
// (see loop in semacquire).
- if atomicload(&root.nwait) == 0 {
+ if atomic.Load(&root.nwait) == 0 {
return
}
// Harder case: search for a waiter and wake it.
lock(&root.lock)
- if atomicload(&root.nwait) == 0 {
+ if atomic.Load(&root.nwait) == 0 {
// The count is already consumed by another goroutine,
// so no need to wake up another goroutine.
unlock(&root.lock)
s := root.head
for ; s != nil; s = s.next {
if s.elem == unsafe.Pointer(addr) {
- xadd(&root.nwait, -1)
+ atomic.Xadd(&root.nwait, -1)
root.dequeue(s)
break
}
func cansemacquire(addr *uint32) bool {
for {
- v := atomicload(addr)
+ v := atomic.Load(addr)
if v == 0 {
return false
}
- if cas(addr, v, v-1) {
+ if atomic.Cas(addr, v, v-1) {
return true
}
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
var sig struct {
note note
if mask&bit != 0 {
return true // signal already in queue
}
- if cas(&sig.mask[s/32], mask, mask|bit) {
+ if atomic.Cas(&sig.mask[s/32], mask, mask|bit) {
break
}
}
// Notify receiver that queue has new bit.
Send:
for {
- switch atomicload(&sig.state) {
+ switch atomic.Load(&sig.state) {
default:
throw("sigsend: inconsistent state")
case sigIdle:
- if cas(&sig.state, sigIdle, sigSending) {
+ if atomic.Cas(&sig.state, sigIdle, sigSending) {
break Send
}
case sigSending:
// notification already pending
break Send
case sigReceiving:
- if cas(&sig.state, sigReceiving, sigIdle) {
+ if atomic.Cas(&sig.state, sigReceiving, sigIdle) {
notewakeup(&sig.note)
break Send
}
// Wait for updates to be available from signal sender.
Receive:
for {
- switch atomicload(&sig.state) {
+ switch atomic.Load(&sig.state) {
default:
throw("signal_recv: inconsistent state")
case sigIdle:
- if cas(&sig.state, sigIdle, sigReceiving) {
+ if atomic.Cas(&sig.state, sigIdle, sigReceiving) {
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
break Receive
}
case sigSending:
- if cas(&sig.state, sigSending, sigIdle) {
+ if atomic.Cas(&sig.state, sigSending, sigIdle) {
break Receive
}
}
// Incorporate updates from sender into local copy.
for i := range sig.mask {
- sig.recv[i] = xchg(&sig.mask[i], 0)
+ sig.recv[i] = atomic.Xchg(&sig.mask[i], 0)
}
}
}
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
/*
Stack layout parameters.
// NOTE: stackguard0 may change underfoot, if another thread
// is about to try to preempt gp. Read it just once and use that same
// value now and below.
- preempt := atomicloaduintptr(&gp.stackguard0) == stackPreempt
+ preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
// Be conservative about where we preempt.
// We are interested in preempting user Go code, not runtime code.
package runtime
import (
+ "runtime/internal/atomic"
"unsafe"
)
for {
ms := maxstring
- if uintptr(size) <= uintptr(ms) || casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) {
+ if uintptr(size) <= uintptr(ms) || atomic.Casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) {
return
}
}
s := *(*string)(unsafe.Pointer(&ss))
for {
ms := maxstring
- if uintptr(len(s)) <= ms || casuintptr(&maxstring, ms, uintptr(len(s))) {
+ if uintptr(len(s)) <= ms || atomic.Casuintptr(&maxstring, ms, uintptr(len(s))) {
break
}
}
// See the assembly implementations for more details.
func cgocallback_gofunc(fv uintptr, frame uintptr, framesize uintptr)
-//go:noescape
-func cas(ptr *uint32, old, new uint32) bool
-
-// NO go:noescape annotation; see atomic_pointer.go.
-func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
-
-func nop() // call to prevent inlining of function body
-
-//go:noescape
-func casuintptr(ptr *uintptr, old, new uintptr) bool
-
-//go:noescape
-func atomicstoreuintptr(ptr *uintptr, new uintptr)
-
-//go:noescape
-func atomicloaduintptr(ptr *uintptr) uintptr
-
-//go:noescape
-func atomicloaduint(ptr *uint) uint
-
-// TODO: Write native implementations of int64 atomic ops (or improve
-// inliner). These portable ones can't be inlined right now, so we're
-// taking an extra function call hit.
-
-func atomicstoreint64(ptr *int64, new int64) {
- atomicstore64((*uint64)(unsafe.Pointer(ptr)), uint64(new))
-}
-
-func atomicloadint64(ptr *int64) int64 {
- return int64(atomicload64((*uint64)(unsafe.Pointer(ptr))))
-}
-
-func xaddint64(ptr *int64, delta int64) int64 {
- return int64(xadd64((*uint64)(unsafe.Pointer(ptr)), delta))
-}
-
// publicationBarrier performs a store/store barrier (a "publication"
// or "export" barrier). Some form of synchronization is required
// between initializing an object and making that object accessible to
SWI $0x80
RET
-TEXT runtime·cas(SB),NOSPLIT,$0
- B runtime·armcas(SB)
-
-TEXT runtime·casp1(SB),NOSPLIT,$0
- B runtime·cas(SB)
-
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
SWI $0
RET
-TEXT runtime·casp1(SB),NOSPLIT,$0
- B runtime·cas(SB)
-
-// TODO(minux): this is only valid for ARMv6+
-// bool armcas(int32 *val, int32 old, int32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// }else
-// return 0;
-TEXT runtime·cas(SB),NOSPLIT,$0
- B runtime·armcas(SB)
-
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
SWI $0
RET
-// Use kernel version instead of native armcas in asm_arm.s.
-// See ../sync/atomic/asm_linux_arm.s for details.
-TEXT cas<>(SB),NOSPLIT,$0
- MOVW $0xffff0fc0, R15 // R15 is hardware PC.
-
-TEXT runtime·cas(SB),NOSPLIT,$0
- MOVW ptr+0(FP), R2
- MOVW old+4(FP), R0
-loop:
- MOVW new+8(FP), R1
- BL cas<>(SB)
- BCC check
- MOVW $1, R0
- MOVB R0, ret+12(FP)
- RET
-check:
- // Kernel lies; double-check.
- MOVW ptr+0(FP), R2
- MOVW old+4(FP), R0
- MOVW 0(R2), R3
- CMP R0, R3
- BEQ loop
- MOVW $0, R0
- MOVB R0, ret+12(FP)
- RET
-
-TEXT runtime·casp1(SB),NOSPLIT,$0
- B runtime·cas(SB)
-
// As for cas, memory barriers are complicated on ARM, but the kernel
// provides a user helper. ARMv5 does not support SMP and has no
// memory barrier instruction at all. ARMv6 added SMP support and has
NACL_SYSCALL(SYS_get_random_bytes)
RET
-TEXT runtime·casp1(SB),NOSPLIT,$0
- B runtime·cas(SB)
-
-// This is only valid for ARMv6+, however, NaCl/ARM is only defined
-// for ARMv7A anyway.
-// bool armcas(int32 *val, int32 old, int32 new)
-// AtomiBLy:
-// if(*val == old){
-// *val = new;
-// return 1;
-// }else
-// return 0;
-TEXT runtime·cas(SB),NOSPLIT,$0
- B runtime·armcas(SB)
-
// Likewise, this is only valid for ARMv7+, but that's okay.
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
SWI $0xa0005c // sys_fcntl
RET
-TEXT runtime·casp1(SB),NOSPLIT,$0
- B runtime·cas(SB)
-
-// TODO(minux): this is only valid for ARMv6+
-// bool armcas(int32 *val, int32 old, int32 new)
-// Atomically:
-// if(*val == old){
-// *val = new;
-// return 1;
-// }else
-// return 0;
-TEXT runtime·cas(SB),NOSPLIT,$0
- B runtime·armcas(SB)
-
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
MOVW R0, ret+4(FP)
RET
-TEXT runtime·casp1(SB),NOSPLIT,$0
- //B runtime·armcas(SB)
- B runtime·cas(SB)
-
-TEXT runtime·cas(SB),NOSPLIT,$0
- B runtime·armcas(SB)
-
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Event types in the trace, args are given in square brackets.
const (
// that used to call xadd64 and cputicks are sensitive to that.
//go:nosplit
func tracestamp() (seq uint64, ts int64) {
- seq = atomicload64(&traceseq)
- for seq&1 != 0 || !cas64(&traceseq, seq, seq+1) {
- seq = atomicload64(&traceseq)
+ seq = atomic.Load64(&traceseq)
+ for seq&1 != 0 || !atomic.Cas64(&traceseq, seq, seq+1) {
+ seq = atomic.Load64(&traceseq)
}
ts = cputicks()
- atomicstore64(&traceseq, seq+2)
+ atomic.Store64(&traceseq, seq+2)
return seq >> 1, ts
}
RET
TEXT ·generalCAS64(SB),NOSPLIT,$0-21
- B runtime·cas64(SB)
+ B runtime∕internal∕atomic·Cas64(SB)
GLOBL armCAS64(SB), NOPTR, $4