if os.Getenv("SSATEST") == "" {
return false
}
- case "arm":
- // nacl/arm doesn't work yet
- if obj.Getgoos() == "nacl" && os.Getenv("SSATEST") == "" {
- return false
- }
- case "amd64":
+ case "amd64", "arm":
// Generally available.
}
if !ssaEnabled {
ctxt *obj.Link // Generic arch information
optimize bool // Do optimization
noDuffDevice bool // Don't use Duff's device
+ nacl bool // GOOS=nacl
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
curFunc *Func
}
c.ctxt = ctxt
c.optimize = optimize
+ c.nacl = obj.Getgoos() == "nacl"
- // Don't use Duff's device on Plan 9, because floating
+ // Don't use Duff's device on Plan 9 AMD64, because floating
// point operations are not allowed in note handler.
- if obj.Getgoos() == "plan9" {
+ if obj.Getgoos() == "plan9" && arch == "amd64" {
c.noDuffDevice = true
}
+ if c.nacl {
+ c.noDuffDevice = true // Don't use Duff's device on NaCl
+
+ // ARM assembler rewrites DIV/MOD to runtime calls, which
+ // clobber R12 on nacl
+ opcodeTable[OpARMDIV].reg.clobbers |= 1 << 12 // R12
+ opcodeTable[OpARMDIVU].reg.clobbers |= 1 << 12 // R12
+ opcodeTable[OpARMMOD].reg.clobbers |= 1 << 12 // R12
+ opcodeTable[OpARMMODU].reg.clobbers |= 1 << 12 // R12
+ }
+
// Assign IDs to preallocated values/blocks.
for i := range c.values {
c.values[i].ID = ID(i)
// 4 and 128 are magic constants, see runtime/mkduff.go
(Zero [s] ptr mem)
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
- && SizeAndAlign(s).Align()%4 == 0 ->
+ && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice ->
(DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
// Large zeroing uses a loop
(Zero [s] ptr mem)
- && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512
+ && SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice)
&& SizeAndAlign(s).Align()%4 == 0 ->
(LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
// 8 and 128 are magic constants, see runtime/mkduff.go
(Move [s] dst src mem)
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
- && SizeAndAlign(s).Align()%4 == 0 ->
+ && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice ->
(DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
// Large move uses a loop
(Move [s] dst src mem)
- && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512
+ && SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice)
&& SizeAndAlign(s).Align()%4 == 0 ->
(LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
(CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c])
// use indexed loads and stores
-(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
-(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
-(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
-(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
-(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
-(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
-(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
-(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem)
+(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem)
+(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem)
// constant folding in indexed loads and stores
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch)
}
}
+ if s.f.Config.nacl && s.f.Config.arch == "arm" {
+ s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
+ }
s.regs = make([]regState, s.numRegs)
s.values = make([]valState, f.NumValues())
return true
}
// match: (MOVWload [0] {sym} (ADD ptr idx) mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWloadidx ptr idx mem)
for {
if v.AuxInt != 0 {
ptr := v_0.Args[0]
idx := v_0.Args[1]
mem := v.Args[1]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadidx)
return true
}
// match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWloadshiftLL ptr idx [c] mem)
for {
if v.AuxInt != 0 {
idx := v_0.Args[1]
c := v_0.AuxInt
mem := v.Args[1]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftLL)
return true
}
// match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWloadshiftRL ptr idx [c] mem)
for {
if v.AuxInt != 0 {
idx := v_0.Args[1]
c := v_0.AuxInt
mem := v.Args[1]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRL)
return true
}
// match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWloadshiftRA ptr idx [c] mem)
for {
if v.AuxInt != 0 {
idx := v_0.Args[1]
c := v_0.AuxInt
mem := v.Args[1]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRA)
return true
}
// match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWstoreidx ptr idx val mem)
for {
if v.AuxInt != 0 {
idx := v_0.Args[1]
val := v.Args[1]
mem := v.Args[2]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreidx)
return true
}
// match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWstoreshiftLL ptr idx [c] val mem)
for {
if v.AuxInt != 0 {
c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftLL)
return true
}
// match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWstoreshiftRL ptr idx [c] val mem)
for {
if v.AuxInt != 0 {
c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRL)
return true
}
// match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
- // cond: sym == nil
+ // cond: sym == nil && !config.nacl
// result: (MOVWstoreshiftRA ptr idx [c] val mem)
for {
if v.AuxInt != 0 {
c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
- if !(sym == nil) {
+ if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRA)
return true
}
// match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
// result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpARMDUFFCOPY)
return true
}
// match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
+ // cond: SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0
// result: (LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+ if !(SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpARMLoweredMove)
return true
}
// match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
// result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
for {
s := v.AuxInt
ptr := v.Args[0]
mem := v.Args[1]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpARMDUFFZERO)
return true
}
// match: (Zero [s] ptr mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
+ // cond: SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0
// result: (LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
for {
s := v.AuxInt
ptr := v.Args[0]
mem := v.Args[1]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
+ if !(SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpARMLoweredZero)
-// +build !amd64,!arm nacl,arm
+// +build !amd64,!arm
// errorcheck -0 -l -live -wb=0
// Copyright 2014 The Go Authors. All rights reserved.
-// +build amd64 arm,!nacl
+// +build amd64 arm
// errorcheck -0 -l -live -wb=0
// Copyright 2014 The Go Authors. All rights reserved.
// Fails on ppc64x because of incomplete optimization.
// See issues 9058.
// Same reason for mips64x and s390x.
-// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x,!arm nacl,arm
+// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x,!arm
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// errorcheck -0 -d=nil
-// +build amd64 arm,!nacl
+// +build amd64 arm
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
-// +build !amd64,!arm nacl,arm
+// +build !amd64,!arm
// errorcheck -0 -d=append,slice
// Copyright 2015 The Go Authors. All rights reserved.