)
type Sig struct {
- name string
- pkg *types.Pkg
- isym *types.Sym
- tsym *types.Sym
- type_ *types.Type
- mtype *types.Type
- offset int32
- }
-
- // siglt sorts method signatures by name, then package path.
- func siglt(a, b *Sig) bool {
- if a.name != b.name {
- return a.name < b.name
- }
- if a.pkg == b.pkg {
- return false
- }
- if a.pkg == nil {
- return true
- }
- if b.pkg == nil {
- return false
- }
- return a.pkg.Path < b.pkg.Path
+ name *types.Sym
+ isym *types.Sym
+ tsym *types.Sym
+ type_ *types.Type
+ mtype *types.Type
}
// Builds a type representing a Bucket structure for
// the given map type. This type is not visible to users -
// we include only enough information to generate a correct GC
// program for it.
- // Make sure this stays in sync with ../../../../runtime/hashmap.go!
+ // Make sure this stays in sync with runtime/map.go.
const (
BUCKETSIZE = 8
MAXKEYSIZE = 128
bucket := types.New(TSTRUCT)
keytype := t.Key()
- valtype := t.Val()
+ valtype := t.Elem()
dowidth(keytype)
dowidth(valtype)
if keytype.Width > MAXKEYSIZE {
// buckets can be marked as having no pointers.
// Arrange for the bucket to have no pointers by changing
// the type of the overflow field to uintptr in this case.
- // See comment on hmap.overflow in ../../../../runtime/hashmap.go.
+ // See comment on hmap.overflow in runtime/map.go.
otyp := types.NewPtr(bucket)
if !types.Haspointers(valtype) && !types.Haspointers(keytype) {
otyp = types.Types[TUINTPTR]
if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
Fatalf("key indirect incorrect for %v", t)
}
- if t.Val().Width > MAXVALSIZE && !valtype.IsPtr() {
+ if t.Elem().Width > MAXVALSIZE && !valtype.IsPtr() {
Fatalf("value indirect incorrect for %v", t)
}
if keytype.Width%int64(keytype.Align) != 0 {
}
// hmap builds a type representing a Hmap structure for the given map type.
- // Make sure this stays in sync with ../../../../runtime/hashmap.go.
+ // Make sure this stays in sync with runtime/map.go.
func hmap(t *types.Type) *types.Type {
if t.MapType().Hmap != nil {
return t.MapType().Hmap
// nevacuate uintptr
// extra unsafe.Pointer // *mapextra
// }
- // must match ../../../../runtime/hashmap.go:hmap.
+ // must match runtime/map.go:hmap.
fields := []*types.Field{
makefield("count", types.Types[TINT]),
makefield("flags", types.Types[TUINT8]),
}
// hiter builds a type representing an Hiter structure for the given map type.
- // Make sure this stays in sync with ../../../../runtime/hashmap.go.
+ // Make sure this stays in sync with runtime/map.go.
func hiter(t *types.Type) *types.Type {
if t.MapType().Hiter != nil {
return t.MapType().Hiter
// bucket uintptr
// checkBucket uintptr
// }
- // must match ../../../../runtime/hashmap.go:hiter.
+ // must match runtime/map.go:hiter.
fields := []*types.Field{
- makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
- makefield("val", types.NewPtr(t.Val())), // Used in range.go for TMAP.
+ makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
+ makefield("val", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
makefield("t", types.Types[TUNSAFEPTR]),
makefield("h", types.NewPtr(hmap)),
makefield("buckets", types.NewPtr(bmap)),
func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
var in []*Node
if receiver != nil {
- d := nod(ODCLFIELD, nil, nil)
- d.Type = receiver
+ d := anonfield(receiver)
in = append(in, d)
}
- var d *Node
for _, t := range f.Params().Fields().Slice() {
- d = nod(ODCLFIELD, nil, nil)
- d.Type = t.Type
+ d := anonfield(t.Type)
d.SetIsddd(t.Isddd())
in = append(in, d)
}
var out []*Node
for _, t := range f.Results().Fields().Slice() {
- d = nod(ODCLFIELD, nil, nil)
- d.Type = t.Type
+ d := anonfield(t.Type)
out = append(out, d)
}
method := f.Sym
if method == nil {
- continue
+ break
}
// get receiver type for this particular method.
// if pointer receiver but non-pointer t and
// this is not an embedded pointer inside a struct,
// method does not apply.
- this := f.Type.Recv().Type
-
- if this.IsPtr() && this.Elem() == t {
- continue
- }
- if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) {
+ if !isMethodApplicable(t, f) {
continue
}
- var sig Sig
- ms = append(ms, &sig)
-
- sig.name = method.Name
- if !exportname(method.Name) {
- if method.Pkg == nil {
- Fatalf("methods: missing package")
- }
- sig.pkg = method.Pkg
+ sig := &Sig{
+ name: method,
+ isym: methodSym(it, method),
+ tsym: methodSym(t, method),
+ type_: methodfunc(f.Type, t),
+ mtype: methodfunc(f.Type, nil),
}
+ ms = append(ms, sig)
- sig.isym = methodsym(method, it, true)
- sig.tsym = methodsym(method, t, false)
- sig.type_ = methodfunc(f.Type, t)
- sig.mtype = methodfunc(f.Type, nil)
+ this := f.Type.Recv().Type
if !sig.isym.Siggen() {
sig.isym.SetSiggen(true)
- if !eqtype(this, it) || this.Width < int64(Widthptr) {
+ if !eqtype(this, it) {
compiling_wrappers = true
- genwrapper(it, f, sig.isym, true)
+ genwrapper(it, f, sig.isym)
compiling_wrappers = false
}
}
sig.tsym.SetSiggen(true)
if !eqtype(this, t) {
compiling_wrappers = true
- genwrapper(t, f, sig.tsym, false)
+ genwrapper(t, f, sig.tsym)
compiling_wrappers = false
}
}
}
- obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) })
return ms
}
if f.Type.Etype != TFUNC || f.Sym == nil {
continue
}
- method := f.Sym
- var sig = Sig{
- name: method.Name,
- }
- if !exportname(method.Name) {
- if method.Pkg == nil {
- Fatalf("imethods: missing package")
- }
- sig.pkg = method.Pkg
+ if f.Sym.IsBlank() {
+ Fatalf("unexpected blank symbol in interface method set")
}
-
- sig.mtype = f.Type
- sig.offset = 0
- sig.type_ = methodfunc(f.Type, nil)
-
if n := len(methods); n > 0 {
last := methods[n-1]
- if !(siglt(last, &sig)) {
- Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name)
+ if !last.name.Less(f.Sym) {
+ Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
}
}
- methods = append(methods, &sig)
- // Compiler can only refer to wrappers for non-blank methods.
- if method.IsBlank() {
- continue
+ sig := &Sig{
+ name: f.Sym,
+ mtype: f.Type,
+ type_: methodfunc(f.Type, nil),
}
+ methods = append(methods, sig)
// NOTE(rsc): Perhaps an oversight that
// IfaceType.Method is not in the reflect data.
// Generate the method body, so that compiled
// code can refer to it.
- isym := methodsym(method, t, false)
+ isym := methodSym(t, f.Sym)
if !isym.Siggen() {
isym.SetSiggen(true)
- genwrapper(t, f, isym, false)
+ genwrapper(t, f, isym)
}
}
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
ns := Ctxt.Lookup(`type..importpath."".`)
- return dsymptrOff(s, ot, ns, 0)
+ return dsymptrOff(s, ot, ns)
}
dimportpath(pkg)
- return dsymptrOff(s, ot, pkg.Pathsym, 0)
+ return dsymptrOff(s, ot, pkg.Pathsym)
}
// dnameField dumps a reflect.name for a struct field.
func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
- if !exportname(ft.Sym.Name) && ft.Sym.Pkg != spkg {
+ if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
Fatalf("package mismatch for %v", ft.Sym)
}
- nsym := dname(ft.Sym.Name, ft.Note, nil, exportname(ft.Sym.Name))
+ nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
return dsymptr(lsym, ot, nsym, 0)
}
if mcount != int(uint16(mcount)) {
Fatalf("too many methods on %v: %d", t, mcount)
}
+ xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
if dataAdd != int(uint32(dataAdd)) {
Fatalf("methods are too far away on %v: %d", t, dataAdd)
}
ot = duint16(lsym, ot, uint16(mcount))
- ot = duint16(lsym, ot, 0)
+ ot = duint16(lsym, ot, uint16(xcount))
ot = duint32(lsym, ot, uint32(dataAdd))
ot = duint32(lsym, ot, 0)
return ot
func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
for _, a := range methods(t) {
// ../../../../runtime/type.go:/method
- exported := exportname(a.name)
+ exported := types.IsExported(a.name.Name)
var pkg *types.Pkg
- if !exported && a.pkg != typePkg(t) {
- pkg = a.pkg
+ if !exported && a.name.Pkg != typePkg(t) {
+ pkg = a.name.Pkg
}
- nsym := dname(a.name, "", pkg, exported)
+ nsym := dname(a.name.Name, "", pkg, exported)
- ot = dsymptrOff(lsym, ot, nsym, 0)
+ ot = dsymptrOff(lsym, ot, nsym)
ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
case TINTER:
// struct { Itab *tab; void *data; } or
// struct { Type *type; void *data; }
+ // Note: see comment in plive.go:onebitwalktype1.
return 2 * int64(Widthptr)
case TSLICE:
)
// dcommontype dumps the contents of a reflect.rtype (runtime._type).
- func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int {
- if ot != 0 {
- Fatalf("dcommontype %d", ot)
- }
-
+ func dcommontype(lsym *obj.LSym, t *types.Type) int {
sizeofAlg := 2 * Widthptr
if algarray == nil {
algarray = sysfunc("algarray")
// str nameOff
// ptrToThis typeOff
// }
+ ot := 0
ot = duintptr(lsym, ot, uint64(t.Width))
ot = duintptr(lsym, ot, uint64(ptrdata))
ot = duint32(lsym, ot, typehash(t))
p = "*" + p
tflag |= tflagExtraStar
if t.Sym != nil {
- exported = exportname(t.Sym.Name)
+ exported = types.IsExported(t.Sym.Name)
}
} else {
if t.Elem() != nil && t.Elem().Sym != nil {
- exported = exportname(t.Elem().Sym.Name)
+ exported = types.IsExported(t.Elem().Sym.Name)
}
}
ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
nsym := dname(p, "", nil, exported)
- ot = dsymptrOff(lsym, ot, nsym, 0) // str
+ ot = dsymptrOff(lsym, ot, nsym) // str
// ptrToThis
if sptr == nil {
ot = duint32(lsym, ot, 0)
} else if sptrWeak {
ot = dsymptrWeakOff(lsym, ot, sptr)
} else {
- ot = dsymptrOff(lsym, ot, sptr, 0)
+ ot = dsymptrOff(lsym, ot, sptr)
}
return ot
ot := 0
switch t.Etype {
default:
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
case TARRAY:
s1 := dtypesym(t.Elem())
t2 := types.NewSlice(t.Elem())
s2 := dtypesym(t2)
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = dsymptr(lsym, ot, s2, 0)
ot = duintptr(lsym, ot, uint64(t.NumElem()))
case TSLICE:
// ../../../../runtime/type.go:/sliceType
s1 := dtypesym(t.Elem())
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
case TCHAN:
// ../../../../runtime/type.go:/chanType
s1 := dtypesym(t.Elem())
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = duintptr(lsym, ot, uint64(t.ChanDir()))
ot = dextratype(lsym, ot, t, 0)
dtypesym(t1.Type)
}
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
inCount := t.NumRecvs() + t.NumParams()
outCount := t.NumResults()
if isddd {
}
// ../../../../runtime/type.go:/interfaceType
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
var tpkg *types.Pkg
if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
for _, a := range m {
// ../../../../runtime/type.go:/imethod
- exported := exportname(a.name)
+ exported := types.IsExported(a.name.Name)
var pkg *types.Pkg
- if !exported && a.pkg != tpkg {
- pkg = a.pkg
+ if !exported && a.name.Pkg != tpkg {
+ pkg = a.name.Pkg
}
- nsym := dname(a.name, "", pkg, exported)
+ nsym := dname(a.name.Name, "", pkg, exported)
- ot = dsymptrOff(lsym, ot, nsym, 0)
- ot = dsymptrOff(lsym, ot, dtypesym(a.type_), 0)
+ ot = dsymptrOff(lsym, ot, nsym)
+ ot = dsymptrOff(lsym, ot, dtypesym(a.type_))
}
// ../../../../runtime/type.go:/mapType
case TMAP:
s1 := dtypesym(t.Key())
- s2 := dtypesym(t.Val())
+ s2 := dtypesym(t.Elem())
s3 := dtypesym(bmap(t))
- s4 := dtypesym(hmap(t))
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = dsymptr(lsym, ot, s2, 0)
ot = dsymptr(lsym, ot, s3, 0)
- ot = dsymptr(lsym, ot, s4, 0)
if t.Key().Width > MAXKEYSIZE {
ot = duint8(lsym, ot, uint8(Widthptr))
ot = duint8(lsym, ot, 1) // indirect
ot = duint8(lsym, ot, 0) // not indirect
}
- if t.Val().Width > MAXVALSIZE {
+ if t.Elem().Width > MAXVALSIZE {
ot = duint8(lsym, ot, uint8(Widthptr))
ot = duint8(lsym, ot, 1) // indirect
} else {
- ot = duint8(lsym, ot, uint8(t.Val().Width))
+ ot = duint8(lsym, ot, uint8(t.Elem().Width))
ot = duint8(lsym, ot, 0) // not indirect
}
case TPTR32, TPTR64:
if t.Elem().Etype == TANY {
// ../../../../runtime/type.go:/UnsafePointerType
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
break
// ../../../../runtime/type.go:/ptrType
s1 := dtypesym(t.Elem())
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
// for security, only the exported fields.
case TSTRUCT:
fields := t.Fields().Slice()
+
+ // omitFieldForAwfulBoringCryptoKludge reports whether
+ // the field t should be omitted from the reflect data.
+ // In the crypto/... packages we omit an unexported field
+ // named "boring", to keep from breaking client code that
+ // expects rsa.PublicKey etc to have only public fields.
+ // As the name suggests, this is an awful kludge, but it is
+ // limited to the dev.boringcrypto branch and avoids
+ // much more invasive effects elsewhere.
+ omitFieldForAwfulBoringCryptoKludge := func(t *types.Field) bool {
+ if t.Sym == nil || t.Sym.Name != "boring" || t.Sym.Pkg == nil {
+ return false
+ }
+ path := t.Sym.Pkg.Path
+ if t.Sym.Pkg == localpkg {
+ path = myimportpath
+ }
+ return strings.HasPrefix(path, "crypto/")
+ }
+ newFields := fields[:0:0]
+ for _, t1 := range fields {
+ if !omitFieldForAwfulBoringCryptoKludge(t1) {
+ newFields = append(newFields, t1)
+ }
+ }
+ fields = newFields
+
for _, t1 := range fields {
dtypesym(t1.Type)
}
// information from the field descriptors.
var spkg *types.Pkg
for _, f := range fields {
- if !exportname(f.Sym.Name) {
+ if !types.IsExported(f.Sym.Name) {
spkg = f.Sym.Pkg
break
}
}
- ot = dcommontype(lsym, ot, t)
+ ot = dcommontype(lsym, t)
ot = dgopkgpath(lsym, ot, spkg)
ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
ot = duintptr(lsym, ot, uint64(len(fields)))
sigs := imethods(it)
methods := methods(t)
out := make([]*obj.LSym, 0, len(sigs))
+ // TODO(mdempsky): Short circuit before calling methods(t)?
+ // See discussion on CL 105039.
if len(sigs) == 0 {
return nil
}
}
}
+ if len(sigs) != 0 {
+ Fatalf("incomplete itab")
+ }
+
return out
}
return syms[methodnum]
}
+ // addsignat ensures that a runtime type descriptor is emitted for t.
func addsignat(t *types.Type) {
signatset[t] = struct{}{}
}
// typ typeOff // pointer to symbol
// }
nsym := dname(p.s.Name, "", nil, true)
- ot = dsymptrOff(s, ot, nsym, 0)
- ot = dsymptrOff(s, ot, dtypesym(p.t), 0)
+ ot = dsymptrOff(s, ot, nsym)
+ ot = dsymptrOff(s, ot, dtypesym(p.t))
}
ggloblsym(s, int32(ot), int16(obj.RODATA))
p.w.Ptr(offset / int64(Widthptr))
case TINTER:
- p.w.Ptr(offset / int64(Widthptr))
+ // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
p.w.Ptr(offset/int64(Widthptr) + 1)
case TSLICE:
skipExternal = false // skip external tests
)
+ func tooSlow(t *testing.T) {
+ if testing.Short() {
+ // In -short mode; skip test, except run it on the {darwin,linux,windows}/amd64 builders.
+ if testenv.Builder() != "" && runtime.GOARCH == "amd64" && (runtime.GOOS == "linux" || runtime.GOOS == "darwin" || runtime.GOOS == "windows") {
+ return
+ }
+ t.Skip("skipping test in -short mode")
+ }
+ }
+
func init() {
switch runtime.GOOS {
- case "android", "nacl":
+ case "android", "js", "nacl":
canRun = false
case "darwin":
switch runtime.GOARCH {
skipExternal = true
canRun = false
}
+ case "plan9":
+ switch runtime.GOARCH {
+ case "arm":
+ // many plan9/arm machines are too slow to run
+ // the full set of external tests.
+ skipExternal = true
+ }
case "windows":
exeSuffix = ".exe"
}
fmt.Printf("SKIP\n")
return
}
+ os.Unsetenv("GOROOT_FINAL")
if canRun {
args := []string{"build", "-tags", "testgo", "-o", "testgo" + exeSuffix}
case "linux", "darwin", "freebsd", "windows":
// The race detector doesn't work on Alpine Linux:
// golang.org/issue/14481
- canRace = canCgo && runtime.GOARCH == "amd64" && !isAlpineLinux()
+ canRace = canCgo && runtime.GOARCH == "amd64" && !isAlpineLinux() && runtime.Compiler != "gccgo"
}
}
// Don't let these environment variables confuse the test.
stdout, stderr bytes.Buffer
}
+ // skipIfGccgo skips the test if using gccgo.
+ func skipIfGccgo(t *testing.T, msg string) {
+ if runtime.Compiler == "gccgo" {
+ t.Skipf("skipping test not supported on gccgo: %s", msg)
+ }
+ }
+
// testgo sets up for a test that runs testgo.
func testgo(t *testing.T) *testgoData {
t.Helper()
testenv.MustHaveGoBuild(t)
if skipExternal {
- t.Skip("skipping external tests on %s/%s", runtime.GOOS, runtime.GOARCH)
+ t.Skipf("skipping external tests on %s/%s", runtime.GOOS, runtime.GOARCH)
}
return &testgoData{t: t}
}
func TestProgramNameInCrashMessages(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not use cmd/link")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
}
- tg.tempFile("d1/src/p1/p1.go", `package p1`)
+ // Every main package depends on the "runtime".
+ tg.tempFile("d1/src/p1/p1.go", `package main; func main(){}`)
tg.setenv("GOPATH", tg.path("d1"))
- tg.run("install", "-a", "p1")
+ // Pass -i flag to rebuild everything outdated.
+ tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, before any changes")
// Changing mtime of runtime/internal/sys/sys.go
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
addNL(sys)
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
- tg.run("install", "p1")
+ tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
// Restore to "old" release.
restore()
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go")
- tg.run("install", "p1")
+ tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
// Everything is out of date. Rebuild to leave things in a better state.
}
func TestGoListStandard(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have GOROOT")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
}
func TestGoInstallCleansUpAfterGoBuild(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
}
func TestGoInstallRebuildsStalePackagesInOtherGOPATH(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestGoInstallDetectsRemovedFilesInPackageMain(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestInternalPackagesInGOROOTAreRespected(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have GOROOT")
tg := testgo(t)
defer tg.cleanup()
tg.runFail("build", "-v", "./testdata/testinternal")
tg.grepStderr(`testdata(\/|\\)src(\/|\\)run(\/|\\)bad\.go\:3\:8\: use of internal package not allowed`, "unexpected error for run/bad.go")
}
+ func TestRunPkg(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ dir := filepath.Join(tg.pwd(), "testdata")
+ tg.setenv("GOPATH", dir)
+ tg.run("run", "hello")
+ tg.grepStderr("hello, world", "did not find hello, world")
+ tg.cd(filepath.Join(dir, "src/hello"))
+ tg.run("run", ".")
+ tg.grepStderr("hello, world", "did not find hello, world")
+ }
+
func testMove(t *testing.T, vcs, url, base, config string) {
testenv.MustHaveExternalNetwork(t)
tg.grepStderr("found import comments", "go build did not mention comment conflict")
}
+ func TestImportCycle(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/importcycle"))
+ tg.runFail("build", "selfimport")
+
+ count := tg.grepCountBoth("import cycle not allowed")
+ if count == 0 {
+ t.Fatal("go build did not mention cyclical import")
+ }
+ if count > 1 {
+ t.Fatal("go build mentioned import cycle more than once")
+ }
+
+ // Don't hang forever.
+ tg.run("list", "-e", "-json", "selfimport")
+ }
+
// cmd/go: custom import path checking should not apply to Go packages without import comment.
func TestIssue10952(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
func TestPackageMainTestImportsArchiveNotBinary(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "main_test")
}
+ func TestPackageMainTestCompilerFlags(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.makeTempdir()
+ tg.setenv("GOPATH", tg.path("."))
+ tg.tempFile("src/p1/p1.go", "package main\n")
+ tg.tempFile("src/p1/p1_test.go", "package main\nimport \"testing\"\nfunc Test(t *testing.T){}\n")
+ tg.run("test", "-c", "-n", "p1")
+ tg.grepBothNot(`([\\/]compile|gccgo).* (-p main|-fgo-pkgpath=main).*p1\.go`, "should not have run compile -p main p1.go")
+ tg.grepStderr(`([\\/]compile|gccgo).* (-p p1|-fgo-pkgpath=p1).*p1\.go`, "should have run compile -p p1 p1.go")
+ }
+
// The runtime version string takes one of two forms:
// "go1.X[.Y]" for Go releases, and "devel +hash" at tip.
// Determine whether we are in a released copy by
// Issue 12690
func TestPackageNotStaleWithTrailingSlash(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have GOROOT")
tg := testgo(t)
defer tg.cleanup()
// Issue 4104.
func TestGoTestWithPackageListedMultipleTimes(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestGoListHasAConsistentOrder(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestGoListStdDoesNotIncludeCommands(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestGoListCmdOnlyShowsCommands(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have GOROOT")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/p1/p2/p3/p4/p.go", "package p4\n")
tg.run("list", "-f", "{{.Deps}}", "p1")
tg.grepStdout("p1/p2/p3/p4", "Deps(p1) does not mention p4")
+
+ tg.run("list", "-deps", "p1")
+ tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4")
+
+ // Check the list is in dependency order.
+ tg.run("list", "-deps", "math")
+ want := "internal/cpu\nunsafe\nmath\n"
+ out := tg.stdout.String()
+ if !strings.Contains(out, "internal/cpu") {
+ // Some systems don't use internal/cpu.
+ want = "unsafe\nmath\n"
+ }
+ if tg.stdout.String() != want {
+ t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
+ }
+ }
+
+ func TestGoListTest(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.makeTempdir()
+ tg.setenv("GOCACHE", tg.tempdir)
+
+ tg.run("list", "-test", "-deps", "sort")
+ tg.grepStdout(`^sort.test$`, "missing test main")
+ tg.grepStdout(`^sort$`, "missing real sort")
+ tg.grepStdout(`^sort \[sort.test\]$`, "missing test copy of sort")
+ tg.grepStdout(`^testing \[sort.test\]$`, "missing test copy of testing")
+ tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
+
+ tg.run("list", "-test", "sort")
+ tg.grepStdout(`^sort.test$`, "missing test main")
+ tg.grepStdout(`^sort$`, "missing real sort")
+ tg.grepStdoutNot(`^sort \[sort.test\]$`, "unexpected test copy of sort")
+ tg.grepStdoutNot(`^testing \[sort.test\]$`, "unexpected test copy of testing")
+ tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
+
+ tg.run("list", "-test", "cmd/dist", "cmd/doc")
+ tg.grepStdout(`^cmd/dist$`, "missing cmd/dist")
+ tg.grepStdout(`^cmd/doc$`, "missing cmd/doc")
+ tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test")
+ tg.grepStdoutNot(`^cmd/dist\.test$`, "unexpected cmd/dist test")
+ tg.grepStdoutNot(`^testing`, "unexpected testing")
+
+ tg.run("list", "-test", "runtime/cgo")
+ tg.grepStdout(`^runtime/cgo$`, "missing runtime/cgo")
}
// Issue 4096. Validate the output of unsuccessful go install foo/quxx.
}
func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not support -ldflags -X")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestGoTestCpuprofileLeavesBinaryBehind(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
}
func TestGoTestCpuprofileDashOControlsBinaryLocation(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
}
func TestGoTestMutexprofileLeavesBinaryBehind(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
}
func TestGoTestMutexprofileDashOControlsBinaryLocation(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
}
func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestGoTestDashOWritesBinary(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestGoTestDashIDashOWritesBinary(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// Issue 4515.
func TestInstallWithTags(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("yy/zz")
tg.tempFile("yy/zz/zz.go", "package zz\n")
if err := os.Symlink(tg.path("yy"), tg.path("src/example/xx/yy")); err != nil {
- t.Skip("symlink failed: %v", err)
+ t.Skipf("symlink failed: %v", err)
}
tg.run("list", "example/xx/z...")
tg.grepStdoutNot(".", "list should not have matched anything")
}
func TestShadowingLogic(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
tg := testgo(t)
defer tg.cleanup()
pwd := tg.pwd()
}
func TestCoverageRuns(t *testing.T) {
- if testing.Short() {
- t.Skip("don't build libraries for coverage in short mode")
- }
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-short", "-coverpkg=strings", "strings", "regexp")
checkCoverage(tg, data)
}
+ func TestCoverageDotImport(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
+ tg.run("test", "-coverpkg=coverdot1,coverdot2", "coverdot2")
+ data := tg.getStdout() + tg.getStderr()
+ checkCoverage(tg, data)
+ }
+
// Check that coverage analysis uses set mode.
// Also check that coverage profiles merge correctly.
func TestCoverageUsesSetMode(t *testing.T) {
- if testing.Short() {
- t.Skip("don't build libraries for coverage in short mode")
- }
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.creatingTemp("testdata/cover.out")
}
func TestCoverageUsesAtomicModeForRace(t *testing.T) {
- if testing.Short() {
- t.Skip("don't build libraries for coverage in short mode")
- }
+ tooSlow(t)
if !canRace {
t.Skip("skipping because race detector not supported")
}
}
func TestCoverageSyncAtomicImport(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "-short", "-cover", "-covermode=atomic", "-coverpkg=coverdep/p1", "coverdep")
}
+ func TestCoverageDepLoop(t *testing.T) {
+ tooSlow(t)
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
+ // coverdep2/p1's xtest imports coverdep2/p2 which imports coverdep2/p1.
+ // Make sure that coverage on coverdep2/p2 recompiles coverdep2/p2.
+ tg.run("test", "-short", "-cover", "coverdep2/p1")
+ tg.grepStdout("coverage: 100.0% of statements", "expected 100.0% coverage")
+ }
+
func TestCoverageImportMainLoop(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
}
func TestCoveragePattern(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// (as opposed to pattern matching on deps)
// then it will try to load sleepybad, which does not compile,
// and the test command will fail.
- tg.run("test", "-coverprofile="+filepath.Join(tg.tempdir, "cover.out"), "-coverpkg=sleepy...", "-run=^$", "sleepy1")
+ tg.run("test", "-coverprofile="+tg.path("cover.out"), "-coverpkg=sleepy...", "-run=^$", "sleepy1")
}
func TestCoverageErrorLine(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestTestBuildFailureOutput(t *testing.T) {
+ tooSlow(t)
+
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestCoverageFunc(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "-outputdir="+tg.tempdir, "-coverprofile=cover.out", "coverasm")
- tg.run("tool", "cover", "-func="+filepath.Join(tg.tempdir, "cover.out"))
+ tg.run("tool", "cover", "-func="+tg.path("cover.out"))
tg.grepStdout(`\tg\t*100.0%`, "did not find g 100% covered")
tg.grepStdoutNot(`\tf\t*[0-9]`, "reported coverage for assembly function f")
}
+ // Issue 24588.
+ func TestCoverageDashC(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.makeTempdir()
+ tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
+ tg.run("test", "-c", "-o", tg.path("coverdep"), "-coverprofile="+tg.path("no/such/dir/cover.out"), "coverdep")
+ tg.wantExecutable(tg.path("coverdep"), "go -test -c -coverprofile did not create executable")
+ }
+
func TestPluginNonMain(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
if !canRace {
t.Skip("no race detector")
}
- if testing.Short() && testenv.Builder() == "" {
- t.Skip("don't rebuild the standard library in short mode")
- }
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
}
func TestCoverageWithCgo(t *testing.T) {
+ tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
}
func TestCgoHandlesWlORIGIN(t *testing.T) {
+ tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/origin/origin.go", `package origin
- // #cgo !darwin LDFLAGS: -Wl,-rpath -Wl,$ORIGIN
+ // #cgo !darwin LDFLAGS: -Wl,-rpath,$ORIGIN
// void f(void) {}
import "C"
func f() { C.f() }`)
}
func TestCgoPkgConfig(t *testing.T) {
+ tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
// "go test -c" should also appear to write a new binary every time,
// even if it's really just updating the mtime on an existing up-to-date binary.
func TestIssue6480(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// TODO: tg.parallel()
}
func TestBuildDashIInstallsDependencies(t *testing.T) {
+ tooSlow(t)
+
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.grepBoth(okPattern, "go test did not say ok")
}
+ func TestGoTestMainTwice(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.makeTempdir()
+ tg.setenv("GOCACHE", tg.tempdir)
+ tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
+ tg.run("test", "-v", "multimain")
+ if strings.Count(tg.getStdout(), "notwithstanding") != 2 {
+ t.Fatal("tests did not run twice")
+ }
+ }
+
func TestGoTestFlagsAfterPackage(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "testdata/flag_test.go", "-v", "-args", "-v=7") // Two distinct -v flags.
}
}
+ func TestGoGenerateXTestPkgName(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("skipping because windows has no echo command")
+ }
+
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.tempFile("env_test.go", "package main_test\n\n//go:generate echo $GOPACKAGE")
+ tg.run("generate", tg.path("env_test.go"))
+ want := "main_test"
+ if got := strings.TrimSpace(tg.getStdout()); got != want {
+ t.Errorf("go generate in XTest file got package name %q; want %q", got, want)
+ }
+ }
+
func TestGoGenerateBadImports(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping because windows has no echo command")
tg.run("vet", "-printf=false", "vetpkg")
}
+ // Issue 23395.
+ func TestGoVetWithOnlyTestFiles(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.tempFile("src/p/p_test.go", "package p; import \"testing\"; func TestMe(*testing.T) {}")
+ tg.setenv("GOPATH", tg.path("."))
+ tg.run("vet", "p")
+ }
+
+ // Issue 24193.
+ func TestVetWithOnlyCgoFiles(t *testing.T) {
+ if !canCgo {
+ t.Skip("skipping because cgo not enabled")
+ }
+
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.tempFile("src/p/p.go", "package p; import \"C\"; func F() {}")
+ tg.setenv("GOPATH", tg.path("."))
+ tg.run("vet", "p")
+ }
+
// Issue 9767, 19769.
func TestGoGetDotSlashDownload(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
// Test that you cannot import a main package.
// See golang.org/issue/4210 and golang.org/issue/17475.
func TestImportMain(t *testing.T) {
+ tooSlow(t)
+
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
// accessed by a non-local import (found in a GOPATH/GOROOT).
// See golang.org/issue/17475.
func TestImportLocal(t *testing.T) {
+ tooSlow(t)
+
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
}
func TestGoInstallPkgdir(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
+
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
}
func TestGoTestRaceFailures(t *testing.T) {
+ tooSlow(t)
+
if !canRace {
t.Skip("skipping because race detector not supported")
}
}
func TestGoBuildOutput(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// For issue 14337.
func TestParallelTest(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
}
func TestCgoConsistentResults(t *testing.T) {
+ tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
}
func TestBinaryOnlyPackages(t *testing.T) {
+ tooSlow(t)
+
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.grepStdout("false", "did not see BinaryOnly=false for p4")
}
-// Issue 16050.
-func TestAlwaysLinkSysoFiles(t *testing.T) {
+// Issue 16050 and 21884.
+func TestLinkSysoFiles(t *testing.T) {
+ if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
+ t.Skip("not linux/amd64")
+ }
+
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("CGO_ENABLED", "0")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0")
+
+ tg.setenv("CGO_ENABLED", "1")
+ tg.run("list", "-msan", "-f", "{{.SysoFiles}}", "syso")
+ tg.grepStdoutNot("a.syso", "unexpected syso file with -msan")
}
// Issue 16120.
// Issue 18845
func TestBenchTimeout(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.run("test", "-bench", ".", "-timeout", "750ms", "testdata/timeoutbench_test.go")
// Issue 19394
func TestWriteProfilesOnTimeout(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.tempDir("profiling")
func TestLinkXImportPathEscape(t *testing.T) {
// golang.org/issue/16710
+ skipIfGccgo(t, "gccgo does not support -ldflags -X")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// Issue 18225.
// This is really a cmd/asm issue but this is a convenient place to test it.
func TestConcurrentAsm(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not use cmd/asm")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// Issue 19198.
// This is really a cmd/link issue but this is a convenient place to test it.
func TestDuplicateGlobalAsmSymbols(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not use cmd/asm")
+ tooSlow(t)
if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" {
t.Skipf("skipping test on %s", runtime.GOARCH)
}
}
`)
tg.setenv("GOPATH", tg.path("go"))
- exe := filepath.Join(tg.tempdir, "p.exe")
+ exe := tg.path("p.exe")
tg.creatingTemp(exe)
tg.run("build", "-o", exe, "p")
}
func TestBuildTagsNoComma(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
}
func TestExecutableGOROOT(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no GOROOT")
if runtime.GOOS == "openbsd" {
t.Skipf("test case does not work on %s, missing os.Executable", runtime.GOOS)
}
newRoot := tg.path("new")
t.Run("RelocatedExe", func(t *testing.T) {
- t.Skip("TODO: skipping known broken test; see golang.org/issue/20284")
-
- // Should fall back to default location in binary.
- // No way to dig out other than look at source code.
- data, err := ioutil.ReadFile("../../runtime/internal/sys/zversion.go")
- if err != nil {
- t.Fatal(err)
- }
- m := regexp.MustCompile("var DefaultGoroot = `([^`]+)`").FindStringSubmatch(string(data))
- if m == nil {
- t.Fatal("cannot find DefaultGoroot in ../../runtime/internal/sys/zversion.go")
- }
- check(t, newGoTool, m[1])
+ // Should fall back to default location in binary,
+ // which is the GOROOT we used when building testgo.exe.
+ check(t, newGoTool, testGOROOT)
})
// If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT,
tg.must(os.RemoveAll(tg.path("new/pkg")))
// Binaries built in the new tree should report the
- // new tree when they call runtime.GOROOT().
- // This is implemented by having the go tool pass a -X option
- // to the linker setting runtime/internal/sys.DefaultGoroot.
+ // new tree when they call runtime.GOROOT.
t.Run("RuntimeGoroot", func(t *testing.T) {
// Build a working GOROOT the easy way, with symlinks.
testenv.MustHaveSymlink(t)
}
func TestNeedVersion(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not use cmd/compile")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// Test that user can override default code generation flags.
func TestUserOverrideFlags(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not use -gcflags")
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
}
func TestCgoFlagContainsSpace(t *testing.T) {
+ tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
// Issue #20435.
func TestGoTestRaceCoverModeFailures(t *testing.T) {
+ tooSlow(t)
if !canRace {
t.Skip("skipping because race detector not supported")
}
before()
tg.run("install", "mycmd")
after()
- tg.wantStale("mycmd", "stale dependency: runtime/internal/sys", "should be stale after environment variable change")
+ tg.wantStale("mycmd", "stale dependency", "should be stale after environment variable change")
}
}
x_test.go:15: LOG: Y running N=2000000000
--- BENCH: BenchmarkX/Y
x_test.go:15: LOG: Y running N=1
- x_test.go:15: LOG: Y running N=100
- x_test.go:15: LOG: Y running N=10000
- x_test.go:15: LOG: Y running N=1000000
- x_test.go:15: LOG: Y running N=100000000
x_test.go:15: LOG: Y running N=2000000000
--- BENCH: BenchmarkX
x_test.go:13: LOG: X running N=1
}
func TestListTests(t *testing.T) {
+ tooSlow(t)
var tg *testgoData
testWith := func(listName, expected string) func(*testing.T) {
return func(t *testing.T) {
}
func TestBuildmodePIE(t *testing.T) {
- if runtime.Compiler == "gccgo" {
- t.Skipf("skipping test because buildmode=pie is not supported on gccgo")
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skipf("skipping in -short mode on non-builder")
}
platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
switch platform {
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x",
- "android/amd64", "android/arm", "android/arm64", "android/386":
+ "android/amd64", "android/arm", "android/arm64", "android/386",
+ "freebsd/amd64":
case "darwin/amd64":
default:
t.Skipf("skipping test because buildmode=pie is not supported on %s", platform)
tg.run("build", "-buildmode=pie", "-o", obj, src)
switch runtime.GOOS {
- case "linux", "android":
+ case "linux", "android", "freebsd":
f, err := elf.Open(obj)
if err != nil {
t.Fatal(err)
}
func TestExecBuildX(t *testing.T) {
+ tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
+ tg.setenv("GOCACHE", "off")
+
tg.tempFile("main.go", `package main; import "C"; func main() { print("hello") }`)
src := tg.path("main.go")
obj := tg.path("main")
tg.run("build", "-x", "-o", obj, src)
sh := tg.path("test.sh")
- err := ioutil.WriteFile(sh, []byte(tg.getStderr()), 0666)
+ err := ioutil.WriteFile(sh, []byte("set -e\n"+tg.getStderr()), 0666)
if err != nil {
t.Fatal(err)
}
}
func TestParallelNumber(t *testing.T) {
+ tooSlow(t)
for _, n := range [...]string{"-1", "0"} {
t.Run(n, func(t *testing.T) {
tg := testgo(t)
}
func TestWrongGOOSErrorBeforeLoadError(t *testing.T) {
+ skipIfGccgo(t, "gccgo assumes cross-compilation is always possible")
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
}
func TestUpxCompression(t *testing.T) {
- if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
+ if runtime.GOOS != "linux" ||
+ (runtime.GOARCH != "amd64" && runtime.GOARCH != "386") {
t.Skipf("skipping upx test on %s/%s", runtime.GOOS, runtime.GOARCH)
}
}
func TestBuildCache(t *testing.T) {
+ tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
}
}
+ func TestCacheListStale(t *testing.T) {
+ tooSlow(t)
+ if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
+ t.Skip("GODEBUG gocacheverify")
+ }
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.makeTempdir()
+ tg.setenv("GOCACHE", tg.path("cache"))
+ tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n")
+ tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n")
+ tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n")
+
+ tg.setenv("GOPATH", tg.path("gopath"))
+ tg.run("install", "p", "m")
+ tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p")
+ tg.grepStdout("^m false", "m should not be stale")
+ tg.grepStdout("^q true", "q should be stale")
+ tg.grepStdout("^p false", "p should not be stale")
+ }
+
func TestCacheCoverage(t *testing.T) {
+ tooSlow(t)
+
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.makeTempdir()
- tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "c1"))
- tg.run("test", "-cover", "strings")
- tg.run("test", "-cover", "math", "strings")
+ tg.setenv("GOCACHE", tg.path("c1"))
+ tg.run("test", "-cover", "-short", "strings")
+ tg.run("test", "-cover", "-short", "math", "strings")
+ }
+
+ func TestCacheVet(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+
+ if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
+ t.Skip("GODEBUG gocacheverify")
+ }
+ if os.Getenv("GOCACHE") == "off" {
+ tooSlow(t)
+ tg.makeTempdir()
+ tg.setenv("GOCACHE", tg.path("cache"))
+ }
+
+ // Check that second vet reuses cgo-derived inputs.
+ // The first command could be build instead of vet,
+ // except that if the cache is empty and there's a net.a
+ // in GOROOT/pkg, the build will not bother to regenerate
+ // and cache the cgo outputs, whereas vet always will.
+ tg.run("vet", "os/user")
+ tg.run("vet", "-x", "os/user")
+ tg.grepStderrNot(`^(clang|gcc)`, "should not have run compiler")
+ tg.grepStderrNot(`[\\/]cgo `, "should not have run cgo")
}
func TestIssue22588(t *testing.T) {
}
func TestIssue22531(t *testing.T) {
+ tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
- tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "cache"))
+ tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("src/m/main.go", "package main /* c1 */; func main() {}\n")
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after install")
- tg.run("tool", "buildid", filepath.Join(tg.tempdir, "bin/m"+exeSuffix))
+ tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
// The link action ID did not include the full main build ID,
// even though the full main build ID is written into the
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after reinstall")
- tg.run("tool", "buildid", filepath.Join(tg.tempdir, "bin/m"+exeSuffix))
+ tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
}
func TestIssue22596(t *testing.T) {
+ tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
- tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "cache"))
+ tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath1/src/p/p.go", "package p; func F(){}\n")
tg.tempFile("gopath2/src/p/p.go", "package p; func F(){}\n")
- tg.setenv("GOPATH", filepath.Join(tg.tempdir, "gopath1"))
+ tg.setenv("GOPATH", tg.path("gopath1"))
tg.run("list", "-f={{.Target}}", "p")
target1 := strings.TrimSpace(tg.getStdout())
tg.run("install", "p")
tg.wantNotStale("p", "", "p stale after install")
- tg.setenv("GOPATH", filepath.Join(tg.tempdir, "gopath2"))
+ tg.setenv("GOPATH", tg.path("gopath2"))
tg.run("list", "-f={{.Target}}", "p")
target2 := strings.TrimSpace(tg.getStdout())
tg.must(os.MkdirAll(filepath.Dir(target2), 0777))
}
func TestTestCache(t *testing.T) {
+ tooSlow(t)
+
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
- tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "cache"))
+ tg.setenv("GOCACHE", tg.path("cache"))
- // timeout here should not affect result being cached
- // or being retrieved later.
- tg.run("test", "-x", "-timeout=10s", "errors")
- tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler")
- tg.grepStderr(`[\\/]link|gccgo`, "did not run linker")
- tg.grepStderr(`errors\.test`, "did not run test")
+ if runtime.Compiler != "gccgo" {
+ // timeout here should not affect result being cached
+ // or being retrieved later.
+ tg.run("test", "-x", "-timeout=10s", "errors")
+ tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler")
+ tg.grepStderr(`[\\/]link|gccgo`, "did not run linker")
+ tg.grepStderr(`errors\.test`, "did not run test")
- tg.run("test", "-x", "errors")
- tg.grepStdout(`ok \terrors\t\(cached\)`, "did not report cached result")
- tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler")
- tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker")
- tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
- tg.grepStderrNot("DO NOT USE", "poisoned action status leaked")
+ tg.run("test", "-x", "errors")
+ tg.grepStdout(`ok \terrors\t\(cached\)`, "did not report cached result")
+ tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler")
+ tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker")
+ tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
+ tg.grepStderrNot("DO NOT USE", "poisoned action status leaked")
- // Even very low timeouts do not disqualify cached entries.
- tg.run("test", "-timeout=1ns", "-x", "errors")
- tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
+ // Even very low timeouts do not disqualify cached entries.
+ tg.run("test", "-timeout=1ns", "-x", "errors")
+ tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
- tg.run("clean", "-testcache")
- tg.run("test", "-x", "errors")
- tg.grepStderr(`errors\.test`, "did not run test")
+ tg.run("clean", "-testcache")
+ tg.run("test", "-x", "errors")
+ tg.grepStderr(`errors\.test`, "did not run test")
+ }
// The -p=1 in the commands below just makes the -x output easier to read.
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
- tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler")
- tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker")
+ tg.grepStderrNot(`[\\/](compile|gccgo) `, "incorrectly ran compiler")
+ tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`p[0-9]\.test`, "incorrectly ran test")
t.Log("\n\nCOMMENT\n\n")
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
- tg.grepStderrNot(`([\\/]compile|gccgo).*t[0-9]_test\.go`, "incorrectly ran compiler")
- tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker")
+ tg.grepStderrNot(`([\\/](compile|gccgo) ).*t[0-9]_test\.go`, "incorrectly ran compiler")
+ tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`t[0-9]\.test.*test\.short`, "incorrectly ran test")
t.Log("\n\nCHANGE\n\n")
// so the test should not have been rerun.
tg.grepStderr(`([\\/]compile|gccgo).*t2_test.go`, "did not recompile t2")
tg.grepStderr(`([\\/]link|gccgo).*t2\.test`, "did not relink t2_test")
- tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2")
+ // This check does not currently work with gccgo, as garbage
+ // collection of unused variables is not turned on by default.
+ if runtime.Compiler != "gccgo" {
+ tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2")
+ }
// t3 imports p1, and changing X changes t3's test binary.
tg.grepStderr(`([\\/]compile|gccgo).*t3_test.go`, "did not recompile t3")
// and not rerun.
tg.grepStderrNot(`([\\/]compile|gccgo).*t4_test.go`, "incorrectly recompiled t4")
tg.grepStderr(`([\\/]link|gccgo).*t4\.test`, "did not relink t4_test")
- tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4")
+ // This check does not currently work with gccgo, as garbage
+ // collection of unused variables is not turned on by default.
+ if runtime.Compiler != "gccgo" {
+ tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4")
+ }
+ }
+
+ func TestTestCacheInputs(t *testing.T) {
+ tooSlow(t)
+
+ if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
+ t.Skip("GODEBUG gocacheverify")
+ }
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.makeTempdir()
+ tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
+ tg.setenv("GOCACHE", tg.path("cache"))
+
+ defer os.Remove(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"))
+ defer os.Remove(filepath.Join(tg.pwd(), "testdata/src/testcache/script.sh"))
+ tg.must(ioutil.WriteFile(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"), []byte("x"), 0644))
+ old := time.Now().Add(-1 * time.Minute)
+ tg.must(os.Chtimes(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"), old, old))
+ info, err := os.Stat(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("file.txt: old=%v, info.ModTime=%v", old, info.ModTime()) // help debug when Chtimes lies about succeeding
+ tg.setenv("TESTKEY", "x")
+
+ tg.must(ioutil.WriteFile(filepath.Join(tg.pwd(), "testdata/src/testcache/script.sh"), []byte("#!/bin/sh\nexit 0\n"), 0755))
+ tg.must(os.Chtimes(filepath.Join(tg.pwd(), "testdata/src/testcache/script.sh"), old, old))
+
+ tg.run("test", "testcache")
+ tg.run("test", "testcache")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+
+ tg.setenv("TESTKEY", "y")
+ tg.run("test", "testcache")
+ tg.grepStdoutNot(`\(cached\)`, "did not notice env var change")
+ tg.run("test", "testcache")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+
+ tg.run("test", "testcache", "-run=FileSize")
+ tg.run("test", "testcache", "-run=FileSize")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+ tg.must(ioutil.WriteFile(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"), []byte("xxx"), 0644))
+ tg.run("test", "testcache", "-run=FileSize")
+ tg.grepStdoutNot(`\(cached\)`, "did not notice file size change")
+ tg.run("test", "testcache", "-run=FileSize")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+
+ tg.run("test", "testcache", "-run=Chdir")
+ tg.run("test", "testcache", "-run=Chdir")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+ tg.must(ioutil.WriteFile(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"), []byte("xxxxx"), 0644))
+ tg.run("test", "testcache", "-run=Chdir")
+ tg.grepStdoutNot(`\(cached\)`, "did not notice file size change")
+ tg.run("test", "testcache", "-run=Chdir")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+
+ tg.must(os.Chtimes(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"), old, old))
+ tg.run("test", "testcache", "-run=FileContent")
+ tg.run("test", "testcache", "-run=FileContent")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+ tg.must(ioutil.WriteFile(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"), []byte("yyy"), 0644))
+ old2 := old.Add(10 * time.Second)
+ tg.must(os.Chtimes(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt"), old2, old2))
+ tg.run("test", "testcache", "-run=FileContent")
+ tg.grepStdoutNot(`\(cached\)`, "did not notice file content change")
+ tg.run("test", "testcache", "-run=FileContent")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+
+ tg.run("test", "testcache", "-run=DirList")
+ tg.run("test", "testcache", "-run=DirList")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+ tg.must(os.Remove(filepath.Join(tg.pwd(), "testdata/src/testcache/file.txt")))
+ tg.run("test", "testcache", "-run=DirList")
+ tg.grepStdoutNot(`\(cached\)`, "did not notice directory change")
+ tg.run("test", "testcache", "-run=DirList")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+
+ tg.tempFile("file.txt", "")
+ tg.must(ioutil.WriteFile(filepath.Join(tg.pwd(), "testdata/src/testcache/testcachetmp_test.go"), []byte(`package testcache
+
+ import (
+ "os"
+ "testing"
+ )
+
+ func TestExternalFile(t *testing.T) {
+ os.Open(`+fmt.Sprintf("%q", tg.path("file.txt"))+`)
+ _, err := os.Stat(`+fmt.Sprintf("%q", tg.path("file.txt"))+`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ `), 0666))
+ defer os.Remove(filepath.Join(tg.pwd(), "testdata/src/testcache/testcachetmp_test.go"))
+ tg.run("test", "testcache", "-run=ExternalFile")
+ tg.run("test", "testcache", "-run=ExternalFile")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+ tg.must(os.Remove(filepath.Join(tg.tempdir, "file.txt")))
+ tg.run("test", "testcache", "-run=ExternalFile")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+
+ switch runtime.GOOS {
+ case "nacl", "plan9", "windows":
+ // no shell scripts
+ default:
+ tg.run("test", "testcache", "-run=Exec")
+ tg.run("test", "testcache", "-run=Exec")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+ tg.must(os.Chtimes(filepath.Join(tg.pwd(), "testdata/src/testcache/script.sh"), old2, old2))
+ tg.run("test", "testcache", "-run=Exec")
+ tg.grepStdoutNot(`\(cached\)`, "did not notice script change")
+ tg.run("test", "testcache", "-run=Exec")
+ tg.grepStdout(`\(cached\)`, "did not cache")
+ }
+ }
+
+ func TestNoCache(t *testing.T) {
+ switch runtime.GOOS {
+ case "windows":
+ t.Skipf("no unwritable directories on %s", runtime.GOOS)
+ }
+ if os.Getuid() == 0 {
+ t.Skip("skipping test because running as root")
+ }
+
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.tempFile("triv.go", `package main; func main() {}`)
+ tg.must(os.MkdirAll(tg.path("unwritable"), 0555))
+ home := "HOME"
+ if runtime.GOOS == "plan9" {
+ home = "home"
+ }
+ tg.setenv(home, tg.path(filepath.Join("unwritable", "home")))
+ tg.unsetenv("GOCACHE")
+ tg.run("build", "-o", tg.path("triv"), tg.path("triv.go"))
+ tg.grepStderr("disabling cache", "did not disable cache")
}
func TestTestVet(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
`)
- tg.runFail("test", filepath.Join(tg.tempdir, "p1_test.go"))
+ tg.runFail("test", tg.path("p1_test.go"))
tg.grepStderr(`Logf format %d`, "did not diagnose bad Logf")
- tg.run("test", "-vet=off", filepath.Join(tg.tempdir, "p1_test.go"))
+ tg.run("test", "-vet=off", tg.path("p1_test.go"))
tg.grepStdout(`^ok`, "did not print test summary")
tg.tempFile("p1.go", `
fmt.Printf("%d") // oops
}
`)
- tg.runFail("test", filepath.Join(tg.tempdir, "p1.go"))
+ tg.runFail("test", tg.path("p1.go"))
tg.grepStderr(`Printf format %d`, "did not diagnose bad Printf")
- tg.run("test", "-x", "-vet=shift", filepath.Join(tg.tempdir, "p1.go"))
+ tg.run("test", "-x", "-vet=shift", tg.path("p1.go"))
tg.grepStderr(`[\\/]vet.*-shift`, "did not run vet with -shift")
tg.grepStdout(`\[no test files\]`, "did not print test summary")
- tg.run("test", "-vet=off", filepath.Join(tg.tempdir, "p1.go"))
+ tg.run("test", "-vet=off", tg.path("p1.go"))
tg.grepStdout(`\[no test files\]`, "did not print test summary")
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.run("test", "vetcycle") // must not fail; #22890
+
+ tg.runFail("test", "vetfail/...")
+ tg.grepStderr(`Printf format %d`, "did not diagnose bad Printf")
+ tg.grepStdout(`ok\s+vetfail/p2`, "did not run vetfail/p2")
+ }
+
+ func TestTestVetRebuild(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+
+ // golang.org/issue/23701.
+ // b_test imports b with augmented method from export_test.go.
+ // b_test also imports a, which imports b.
+ // Must not accidentally see un-augmented b propagate through a to b_test.
+ tg.tempFile("src/a/a.go", `package a
+ import "b"
+ type Type struct{}
+ func (*Type) M() b.T {return 0}
+ `)
+ tg.tempFile("src/b/b.go", `package b
+ type T int
+ type I interface {M() T}
+ `)
+ tg.tempFile("src/b/export_test.go", `package b
+ func (*T) Method() *T { return nil }
+ `)
+ tg.tempFile("src/b/b_test.go", `package b_test
+ import (
+ "testing"
+ "a"
+ . "b"
+ )
+ func TestBroken(t *testing.T) {
+ x := new(T)
+ x.Method()
+ _ = new(a.Type)
+ }
+ `)
+
+ tg.setenv("GOPATH", tg.path("."))
+ tg.run("test", "b")
+ tg.run("vet", "b")
}
func TestInstallDeps(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
}
func TestRelativePkgdir(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.makeTempdir()
}
func TestGcflagsPatterns(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", "")
tg.setenv("GOCACHE", "off")
- tg.run("build", "-v", "-gcflags= \t\r\n -e", "fmt")
- tg.grepStderr("fmt", "did not rebuild fmt")
- tg.grepStderrNot("reflect", "incorrectly rebuilt reflect")
+ tg.run("build", "-n", "-v", "-gcflags= \t\r\n -e", "fmt")
+ tg.grepStderr("^# fmt", "did not rebuild fmt")
+ tg.grepStderrNot("^# reflect", "incorrectly rebuilt reflect")
- tg.run("build", "-v", "-gcflags=-e", "fmt", "reflect")
- tg.grepStderr("fmt", "did not rebuild fmt")
- tg.grepStderr("reflect", "did not rebuild reflect")
- tg.grepStderrNot("runtime", "incorrectly rebuilt runtime")
+ tg.run("build", "-n", "-v", "-gcflags=-e", "fmt", "reflect")
+ tg.grepStderr("^# fmt", "did not rebuild fmt")
+ tg.grepStderr("^# reflect", "did not rebuild reflect")
+ tg.grepStderrNot("^# runtime", "incorrectly rebuilt runtime")
- tg.run("build", "-x", "-v", "-gcflags= \t\r\n reflect \t\r\n = \t\r\n -N", "fmt")
- tg.grepStderr("fmt", "did not rebuild fmt")
- tg.grepStderr("reflect", "did not rebuild reflect")
+ tg.run("build", "-n", "-x", "-v", "-gcflags= \t\r\n reflect \t\r\n = \t\r\n -N", "fmt")
+ tg.grepStderr("^# fmt", "did not rebuild fmt")
+ tg.grepStderr("^# reflect", "did not rebuild reflect")
tg.grepStderr("compile.* -N .*-p reflect", "did not build reflect with -N flag")
tg.grepStderrNot("compile.* -N .*-p fmt", "incorrectly built fmt with -N flag")
- tg.run("test", "-c", "-n", "-gcflags=-N", "strings")
- tg.grepStderr("compile.* -N .*compare_test.go", "did not build strings_test package with -N flag")
+ tg.run("test", "-c", "-n", "-gcflags=-N", "-ldflags=-X=x.y=z", "strings")
+ tg.grepStderr("compile.* -N .*compare_test.go", "did not compile strings_test package with -N flag")
+ tg.grepStderr("link.* -X=x.y=z", "did not link strings.test binary with -X flag")
- tg.run("test", "-c", "-n", "-gcflags=strings=-N", "strings")
- tg.grepStderr("compile.* -N .*compare_test.go", "did not build strings_test package with -N flag")
+ tg.run("test", "-c", "-n", "-gcflags=strings=-N", "-ldflags=strings=-X=x.y=z", "strings")
+ tg.grepStderr("compile.* -N .*compare_test.go", "did not compile strings_test package with -N flag")
+ tg.grepStderr("link.* -X=x.y=z", "did not link strings.test binary with -X flag")
}
func TestGoTestMinusN(t *testing.T) {
}
func TestGoTestJSON(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have standard packages")
+ tooSlow(t)
+
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.grepStdout(`"Action":"output","Package":"skipper","Test":"Test","Output":"--- SKIP:`, "did not see SKIP output")
tg.grepStdout(`"Action":"skip","Package":"skipper","Test":"Test"`, "did not see skip result for Test")
+ tg.run("test", "-json", "-short", "-v", "errors")
+ tg.grepStdout(`"Action":"output","Package":"errors","Output":".*\(cached\)`, "did not see no cached output")
+
tg.run("test", "-json", "-bench=NONE", "-short", "-v", "errors")
tg.grepStdout(`"Package":"errors"`, "did not see JSON output")
tg.grepStdout(`"Action":"run"`, "did not see JSON output")
- tg.run("test", "-o", filepath.Join(tg.tempdir, "errors.test.exe"), "-c", "errors")
- tg.run("tool", "test2json", "-p", "errors", filepath.Join(tg.tempdir, "errors.test.exe"), "-test.v", "-test.short")
+ tg.run("test", "-o", tg.path("errors.test.exe"), "-c", "errors")
+ tg.run("tool", "test2json", "-p", "errors", tg.path("errors.test.exe"), "-test.v", "-test.short")
tg.grepStdout(`"Package":"errors"`, "did not see JSON output")
tg.grepStdout(`"Action":"run"`, "did not see JSON output")
tg.grepStdout(`\{"Action":"pass","Package":"errors"\}`, "did not see final pass")
}
func TestFailFast(t *testing.T) {
+ tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
// non-parallel subtests:
{"TestFailingSubtestsA", true, 1},
{"TestFailingSubtestsA", false, 2},
+ // fatal test
+ {"TestFatal[CD]", true, 1},
+ {"TestFatal[CD]", false, 2},
}
for _, tt := range tests {
})
}
}
+
+ // Issue 22986.
+ func TestImportPath(t *testing.T) {
+ tooSlow(t)
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+
+ tg.tempFile("src/a/a.go", `
+ package main
+
+ import (
+ "log"
+ p "a/p-1.0"
+ )
+
+ func main() {
+ if !p.V {
+ log.Fatal("false")
+ }
+ }`)
+
+ tg.tempFile("src/a/a_test.go", `
+ package main_test
+
+ import (
+ p "a/p-1.0"
+ "testing"
+ )
+
+ func TestV(t *testing.T) {
+ if !p.V {
+ t.Fatal("false")
+ }
+ }`)
+
+ tg.tempFile("src/a/p-1.0/p.go", `
+ package p
+
+ var V = true
+
+ func init() {}
+ `)
+
+ tg.setenv("GOPATH", tg.path("."))
+ tg.run("build", "-o", tg.path("a.exe"), "a")
+ tg.run("test", "a")
+ }
+
+ // Issue 23150.
+ func TestCpuprofileTwice(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.tempFile("prof/src/x/x_test.go", `
+ package x_test
+ import (
+ "testing"
+ "time"
+ )
+ func TestSleep(t *testing.T) { time.Sleep(10 * time.Millisecond) }`)
+ tg.setenv("GOPATH", tg.path("prof"))
+ bin := tg.path("x.test")
+ out := tg.path("cpu.out")
+ tg.run("test", "-o="+bin, "-cpuprofile="+out, "x")
+ tg.must(os.Remove(out))
+ tg.run("test", "-o="+bin, "-cpuprofile="+out, "x")
+ tg.mustExist(out)
+ }
+
+ // Issue 23694.
+ func TestAtomicCoverpkgAll(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+
+ tg.tempFile("src/x/x.go", `package x; import _ "sync/atomic"; func F() {}`)
+ tg.tempFile("src/x/x_test.go", `package x; import "testing"; func TestF(t *testing.T) { F() }`)
+ tg.setenv("GOPATH", tg.path("."))
+ tg.run("test", "-coverpkg=all", "-covermode=atomic", "x")
+ if canRace {
+ tg.run("test", "-coverpkg=all", "-race", "x")
+ }
+ }
+
+ // Issue 23882.
+ func TestCoverpkgAllRuntime(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+
+ tg.tempFile("src/x/x.go", `package x; import _ "runtime"; func F() {}`)
+ tg.tempFile("src/x/x_test.go", `package x; import "testing"; func TestF(t *testing.T) { F() }`)
+ tg.setenv("GOPATH", tg.path("."))
+ tg.run("test", "-coverpkg=all", "x")
+ if canRace {
+ tg.run("test", "-coverpkg=all", "-race", "x")
+ }
+ }
+
+ func TestBadCommandLines(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+
+ tg.tempFile("src/x/x.go", "package x\n")
+ tg.setenv("GOPATH", tg.path("."))
+
+ tg.run("build", "x")
+
+ tg.tempFile("src/x/@y.go", "package x\n")
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid input file name \"@y.go\"", "did not reject @y.go")
+ tg.must(os.Remove(tg.path("src/x/@y.go")))
+
+ tg.tempFile("src/x/-y.go", "package x\n")
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid input file name \"-y.go\"", "did not reject -y.go")
+ tg.must(os.Remove(tg.path("src/x/-y.go")))
+
+ if runtime.Compiler == "gccgo" {
+ tg.runFail("build", "-gccgoflags=all=@x", "x")
+ } else {
+ tg.runFail("build", "-gcflags=all=@x", "x")
+ }
+ tg.grepStderr("invalid command-line argument @x in command", "did not reject @x during exec")
+
+ tg.tempFile("src/@x/x.go", "package x\n")
+ tg.setenv("GOPATH", tg.path("."))
+ tg.runFail("build", "@x")
+ tg.grepStderr("invalid input directory name \"@x\"", "did not reject @x directory")
+
+ tg.tempFile("src/@x/y/y.go", "package y\n")
+ tg.setenv("GOPATH", tg.path("."))
+ tg.runFail("build", "@x/y")
+ tg.grepStderr("invalid import path \"@x/y\"", "did not reject @x/y import path")
+
+ tg.tempFile("src/-x/x.go", "package x\n")
+ tg.setenv("GOPATH", tg.path("."))
+ tg.runFail("build", "--", "-x")
+ tg.grepStderr("invalid input directory name \"-x\"", "did not reject -x directory")
+
+ tg.tempFile("src/-x/y/y.go", "package y\n")
+ tg.setenv("GOPATH", tg.path("."))
+ tg.runFail("build", "--", "-x/y")
+ tg.grepStderr("invalid import path \"-x/y\"", "did not reject -x/y import path")
+ }
+
+ func TestBadCgoDirectives(t *testing.T) {
+ if !canCgo {
+ t.Skip("no cgo")
+ }
+ tg := testgo(t)
+ defer tg.cleanup()
+
+ tg.tempFile("src/x/x.go", "package x\n")
+ tg.setenv("GOPATH", tg.path("."))
+
+ if runtime.Compiler == "gc" {
+ tg.tempFile("src/x/x.go", `package x
+
+ //go:cgo_ldflag "-fplugin=foo.so"
+
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("//go:cgo_ldflag .* only allowed in cgo-generated code", "did not reject //go:cgo_ldflag directive")
+ }
+
+ tg.must(os.Remove(tg.path("src/x/x.go")))
+ tg.runFail("build", "x")
+ tg.grepStderr("no Go files", "did not report missing source code")
+ tg.tempFile("src/x/_cgo_yy.go", `package x
+
+ //go:cgo_ldflag "-fplugin=foo.so"
+
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("no Go files", "did not report missing source code") // _* files are ignored...
+
+ if runtime.Compiler == "gc" {
+ tg.runFail("build", tg.path("src/x/_cgo_yy.go")) // ... but if forced, the comment is rejected
+ // Actually, today there is a separate issue that _ files named
+ // on the command-line are ignored. Once that is fixed,
+ // we want to see the cgo_ldflag error.
+ tg.grepStderr("//go:cgo_ldflag only allowed in cgo-generated code|no Go files", "did not reject //go:cgo_ldflag directive")
+ }
+
+ tg.must(os.Remove(tg.path("src/x/_cgo_yy.go")))
+
+ tg.tempFile("src/x/x.go", "package x\n")
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo CFLAGS: -fplugin=foo.so
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid flag in #cgo CFLAGS: -fplugin=foo.so", "did not reject -fplugin")
+
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo CFLAGS: -Ibar -fplugin=foo.so
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid flag in #cgo CFLAGS: -fplugin=foo.so", "did not reject -fplugin")
+
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo pkg-config: -foo
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid pkg-config package name: -foo", "did not reject pkg-config: -foo")
+
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo pkg-config: @foo
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid pkg-config package name: @foo", "did not reject pkg-config: -foo")
+
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo CFLAGS: @foo
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid flag in #cgo CFLAGS: @foo", "did not reject @foo flag")
+
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo CFLAGS: -D
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid flag in #cgo CFLAGS: -D without argument", "did not reject trailing -I flag")
+
+ // Note that -I @foo is allowed because we rewrite it into -I /path/to/src/@foo
+ // before the check is applied. There's no such rewrite for -D.
+
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo CFLAGS: -D @foo
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid flag in #cgo CFLAGS: -D @foo", "did not reject -D @foo flag")
+
+ tg.tempFile("src/x/y.go", `package x
+ // #cgo CFLAGS: -D@foo
+ import "C"
+ `)
+ tg.runFail("build", "x")
+ tg.grepStderr("invalid flag in #cgo CFLAGS: -D@foo", "did not reject -D@foo flag")
+
+ tg.setenv("CGO_CFLAGS", "-D@foo")
+ tg.tempFile("src/x/y.go", `package x
+ import "C"
+ `)
+ tg.run("build", "-n", "x")
+ tg.grepStderr("-D@foo", "did not find -D@foo in commands")
+ }
+
+ func TestTwoPkgConfigs(t *testing.T) {
+ if !canCgo {
+ t.Skip("no cgo")
+ }
+ if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
+ t.Skipf("no shell scripts on %s", runtime.GOOS)
+ }
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.tempFile("src/x/a.go", `package x
+ // #cgo pkg-config: --static a
+ import "C"
+ `)
+ tg.tempFile("src/x/b.go", `package x
+ // #cgo pkg-config: --static a
+ import "C"
+ `)
+ tg.tempFile("pkg-config.sh", `#!/bin/sh
+ echo $* >>`+tg.path("pkg-config.out"))
+ tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755))
+ tg.setenv("GOPATH", tg.path("."))
+ tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh"))
+ tg.run("build", "x")
+ out, err := ioutil.ReadFile(tg.path("pkg-config.out"))
+ tg.must(err)
+ out = bytes.TrimSpace(out)
+ want := "--cflags --static --static -- a a\n--libs --static --static -- a a"
+ if !bytes.Equal(out, []byte(want)) {
+ t.Errorf("got %q want %q", out, want)
+ }
+ }
+
+ // Issue 23982
+ func TestFilepathUnderCwdFormat(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.run("test", "-x", "-cover", "log")
+ tg.grepStderrNot(`\.log\.cover\.go`, "-x output should contain correctly formatted filepath under cwd")
+ }
+
+ // Issue 24396.
+ func TestDontReportRemoveOfEmptyDir(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.parallel()
+ tg.tempFile("src/a/a.go", `package a`)
+ tg.setenv("GOPATH", tg.path("."))
+ tg.run("install", "-x", "a")
+ tg.run("install", "-x", "a")
+ // The second install should have printed only a WORK= line,
+ // nothing else.
+ if bytes.Count(tg.stdout.Bytes(), []byte{'\n'})+bytes.Count(tg.stderr.Bytes(), []byte{'\n'}) > 1 {
+ t.Error("unnecessary output when installing installed package")
+ }
+ }
+
+ // Issue 23264.
+ func TestNoRelativeTmpdir(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+
+ tg.tempFile("src/a/a.go", `package a`)
+ tg.cd(tg.path("."))
+ tg.must(os.Mkdir("tmp", 0777))
+
+ tg.setenv("GOCACHE", "off")
+ tg.setenv("GOPATH", tg.path("."))
+ tg.setenv("GOTMPDIR", "tmp")
+ tg.runFail("build", "a")
+ tg.grepStderr("relative tmpdir", "wrong error")
+
+ if runtime.GOOS != "windows" && runtime.GOOS != "plan9" {
+ tg.unsetenv("GOTMPDIR")
+ tg.setenv("TMPDIR", "tmp")
+ tg.runFail("build", "a")
+ tg.grepStderr("relative tmpdir", "wrong error")
+ }
+ }
"sort"
"strings"
"unicode"
+ "unicode/utf8"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
Root string `json:",omitempty"` // Go root or Go path dir containing this package
ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
BinaryOnly bool `json:",omitempty"` // package cannot be recompiled
+ ForTest string `json:",omitempty"` // package is only for use in named test
+ DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed
// Stale and StaleReason remain here *only* for the list command.
// They are only initialized in preparation for list execution.
StaleReason string `json:",omitempty"` // why is Stale true?
// Source files
+ // If you add to this list you MUST add to p.AllFiles (below) too.
+ // Otherwise file name security lists will not apply to any new additions.
GoFiles []string `json:",omitempty"` // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
CgoFiles []string `json:",omitempty"` // .go sources files that import "C"
IgnoredGoFiles []string `json:",omitempty"` // .go sources ignored due to build constraints
DepsErrors []*PackageError `json:",omitempty"` // errors loading dependencies
// Test information
+ // If you add to this list you MUST add to p.AllFiles (below) too.
+ // Otherwise file name security lists will not apply to any new additions.
TestGoFiles []string `json:",omitempty"` // _test.go files in package
TestImports []string `json:",omitempty"` // imports from TestGoFiles
XTestGoFiles []string `json:",omitempty"` // _test.go files outside package
XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
}
+ // AllFiles returns the names of all the files considered for the package.
+ // This is used for sanity and security checks, so we include all files,
+ // even IgnoredGoFiles, because some subcommands consider them.
+ // The go/build package filtered others out (like foo_wrongGOARCH.s)
+ // and that's OK.
+ func (p *Package) AllFiles() []string {
+ return str.StringList(
+ p.GoFiles,
+ p.CgoFiles,
+ p.IgnoredGoFiles,
+ p.CFiles,
+ p.CXXFiles,
+ p.MFiles,
+ p.HFiles,
+ p.FFiles,
+ p.SFiles,
+ p.SwigFiles,
+ p.SwigCXXFiles,
+ p.SysoFiles,
+ p.TestGoFiles,
+ p.XTestGoFiles,
+ )
+ }
+
+ // Desc returns the package "description", for use in b.showOutput.
+ func (p *Package) Desc() string {
+ if p.ForTest != "" {
+ return p.ImportPath + " [" + p.ForTest + ".test]"
+ }
+ return p.ImportPath
+ }
+
type PackageInternal struct {
// Unexported fields are not part of the public API.
Build *build.Package
CoverVars map[string]*CoverVar // variables created by coverage analysis
OmitDebug bool // tell linker not to write debug information
GobinSubdir bool // install target would be subdir of GOBIN
+ TestmainGo *[]byte // content for _testmain.go
Asmflags []string // -asmflags for this package
Gcflags []string // -gcflags for this package
p.SwigFiles = pp.SwigFiles
p.SwigCXXFiles = pp.SwigCXXFiles
p.SysoFiles = pp.SysoFiles
+ if cfg.BuildMSan {
+ // There's no way for .syso files to be built both with and without
+ // support for memory sanitizer. Assume they are built without,
+ // and drop them.
+ p.SysoFiles = nil
+ }
p.CgoCFLAGS = pp.CgoCFLAGS
p.CgoCPPFLAGS = pp.CgoCPPFLAGS
p.CgoCXXFLAGS = pp.CgoCXXFLAGS
importPath := path
origPath := path
isLocal := build.IsLocalImport(path)
- var debugDeprecatedImportcfgDir string
if isLocal {
importPath = dirToImportPath(filepath.Join(srcDir, path))
- } else if DebugDeprecatedImportcfg.enabled {
- if d, i := DebugDeprecatedImportcfg.lookup(parent, path); d != "" {
- debugDeprecatedImportcfgDir = d
- importPath = i
- }
} else if mode&UseVendor != 0 {
// We do our own vendor resolution, because we want to
// find out the key to use in packageCache without the
// Load package.
// Import always returns bp != nil, even if an error occurs,
// in order to return partial information.
- var bp *build.Package
- var err error
- if debugDeprecatedImportcfgDir != "" {
- bp, err = cfg.BuildContext.ImportDir(debugDeprecatedImportcfgDir, 0)
- } else {
- buildMode := build.ImportComment
- if mode&UseVendor == 0 || path != origPath {
- // Not vendoring, or we already found the vendored path.
- buildMode |= build.IgnoreVendor
- }
- bp, err = cfg.BuildContext.Import(path, srcDir, buildMode)
+ buildMode := build.ImportComment
+ if mode&UseVendor == 0 || path != origPath {
+ // Not vendoring, or we already found the vendored path.
+ buildMode |= build.IgnoreVendor
}
+ bp, err := cfg.BuildContext.Import(path, srcDir, buildMode)
bp.ImportPath = importPath
if cfg.GOBIN != "" {
bp.BinDir = cfg.GOBIN
}
- if debugDeprecatedImportcfgDir == "" && err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path &&
+ if err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path &&
!strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") {
err = fmt.Errorf("code in directory %s expects import %q", bp.Dir, bp.ImportComment)
}
p = setErrorPos(p, importPos)
}
- if debugDeprecatedImportcfgDir == "" && origPath != cleanImport(origPath) {
+ if origPath != cleanImport(origPath) {
p.Error = &PackageError{
ImportStack: stk.Copy(),
Err: fmt.Sprintf("non-canonical import path: %q should be %q", origPath, pathpkg.Clean(origPath)),
dir := filepath.Clean(parent.Dir)
root := filepath.Join(parent.Root, "src")
- if !hasFilePathPrefix(dir, root) || parent.ImportPath != "command-line-arguments" && filepath.Join(root, parent.ImportPath) != dir {
+ if !str.HasFilePathPrefix(dir, root) || parent.ImportPath != "command-line-arguments" && filepath.Join(root, parent.ImportPath) != dir {
// Look for symlinks before reporting error.
dir = expandPath(dir)
root = expandPath(root)
}
- if !hasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || parent.ImportPath != "command-line-arguments" && !parent.Internal.Local && filepath.Join(root, parent.ImportPath) != dir {
+ if !str.HasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || parent.ImportPath != "command-line-arguments" && !parent.Internal.Local && filepath.Join(root, parent.ImportPath) != dir {
base.Fatalf("unexpected directory layout:\n"+
" import path: %s\n"+
" root: %s\n"+
i-- // rewind over slash in ".../internal"
}
parent := p.Dir[:i+len(p.Dir)-len(p.ImportPath)]
- if hasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
return p
}
// Look for symlinks before reporting error.
srcDir = expandPath(srcDir)
parent = expandPath(parent)
- if hasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
return p
}
return p
}
parent := p.Dir[:truncateTo]
- if hasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
return p
}
// Look for symlinks before reporting error.
srcDir = expandPath(srcDir)
parent = expandPath(parent)
- if hasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
+ if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
return p
}
if InstallTargetDir(p) == ToTool {
// This is for 'go tool'.
// Override all the usual logic and force it into the tool directory.
- p.Target = filepath.Join(cfg.GOROOTpkg, "tool", full)
+ if cfg.BuildToolchainName == "gccgo" {
+ p.Target = filepath.Join(base.ToolDir, elem)
+ } else {
+ p.Target = filepath.Join(cfg.GOROOTpkg, "tool", full)
+ }
}
if p.Target != "" && cfg.BuildContext.GOOS == "windows" {
p.Target += ".exe"
// To avoid problems on case-insensitive files, we reject any package
// where two different input files have equal names under a case-insensitive
// comparison.
- f1, f2 := str.FoldDup(str.StringList(
- p.GoFiles,
- p.CgoFiles,
- p.IgnoredGoFiles,
- p.CFiles,
- p.CXXFiles,
- p.MFiles,
- p.HFiles,
- p.FFiles,
- p.SFiles,
- p.SysoFiles,
- p.SwigFiles,
- p.SwigCXXFiles,
- p.TestGoFiles,
- p.XTestGoFiles,
- ))
+ inputs := p.AllFiles()
+ f1, f2 := str.FoldDup(inputs)
if f1 != "" {
p.Error = &PackageError{
ImportStack: stk.Copy(),
return
}
+ // If first letter of input file is ASCII, it must be alphanumeric.
+ // This avoids files turning into flags when invoking commands,
+ // and other problems we haven't thought of yet.
+ // Also, _cgo_ files must be generated by us, not supplied.
+ // They are allowed to have //go:cgo_ldflag directives.
+ // The directory scan ignores files beginning with _,
+ // so we shouldn't see any _cgo_ files anyway, but just be safe.
+ for _, file := range inputs {
+ if !SafeArg(file) || strings.HasPrefix(file, "_cgo_") {
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: fmt.Sprintf("invalid input file name %q", file),
+ }
+ return
+ }
+ }
+ if name := pathpkg.Base(p.ImportPath); !SafeArg(name) {
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: fmt.Sprintf("invalid input directory name %q", name),
+ }
+ return
+ }
+ if !SafeArg(p.ImportPath) {
+ p.Error = &PackageError{
+ ImportStack: stk.Copy(),
+ Err: fmt.Sprintf("invalid import path %q", p.ImportPath),
+ }
+ return
+ }
+
// Build list of imported packages and full dependency list.
imports := make([]*Package, 0, len(p.Imports))
for i, path := range importPaths {
}
}
+ // SafeArg reports whether arg is a "safe" command-line argument,
+ // meaning that when it appears in a command-line, it probably
+ // doesn't have some special meaning other than its own name.
+ // Obviously args beginning with - are not safe (they look like flags).
+ // Less obviously, args beginning with @ are not safe (they look like
+ // GNU binutils flagfile specifiers, sometimes called "response files").
+ // To be conservative, we reject almost any arg beginning with non-alphanumeric ASCII.
+ // We accept leading . _ and / as likely in file system paths.
+ // There is a copy of this function in cmd/compile/internal/gc/noder.go.
+ func SafeArg(name string) bool {
+ if name == "" {
+ return false
+ }
+ c := name[0]
+ return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
+ }
+
// LinkerDeps returns the list of linker-induced dependencies for main package p.
func LinkerDeps(p *Package) []string {
// Everything links runtime.
// InternalGoFiles returns the list of Go files being built for the package,
// using absolute paths.
func (p *Package) InternalGoFiles() []string {
- return p.mkAbs(str.StringList(p.GoFiles, p.CgoFiles, p.TestGoFiles, p.XTestGoFiles))
+ return p.mkAbs(str.StringList(p.GoFiles, p.CgoFiles, p.TestGoFiles))
+ }
+
+ // InternalXGoFiles returns the list of Go files being built for the XTest package,
+ // using absolute paths.
+ func (p *Package) InternalXGoFiles() []string {
+ return p.mkAbs(p.XTestGoFiles)
}
// InternalGoFiles returns the list of all Go files possibly relevant for the package,
for _, pkg := range pkgs {
if pkg.Error != nil {
base.Errorf("can't load package: %s", pkg.Error)
+ printed[pkg.Error] = true
}
for _, err := range pkg.DepsErrors {
// Since these are errors in dependencies,
// symbol in an executable, which is typical when internally
// linking PIE binaries.
TLSIEtoLE func(s *sym.Symbol, off, size int)
+
+ // optional override for assignAddress
+ AssignAddress func(ctxt *Link, sect *sym.Section, n int, s *sym.Symbol, va uint64, isTramp bool) (*sym.Section, int, uint64)
}
var (
- Thearch Arch
+ thearch Arch
Lcsize int32
rpath Rpath
Spsize int32
Segdwarf sym.Segment
)
- /* whence for ldpkg */
- const (
- FileObj = 0 + iota
- ArchiveObj
- Pkgdef
- )
-
const pkgdef = "__.PKGDEF"
var (
}
func libinit(ctxt *Link) {
- Funcalign = Thearch.Funcalign
+ Funcalign = thearch.Funcalign
// add goroot to the end of the libdir list.
suffix := ""
return nil
}
- for i := 0; i < len(ctxt.Libdir); i++ {
+ for _, libdir := range ctxt.Libdir {
if ctxt.linkShared {
- shlibname := filepath.Join(ctxt.Libdir[i], name+".shlibname")
+ shlibname := filepath.Join(libdir, name+".shlibname")
if ctxt.Debugvlog != 0 {
ctxt.Logf("searching for %s.a in %s\n", name, shlibname)
}
return addlibpath(ctxt, "internal", "internal", "", name, shlibname)
}
}
- pname := filepath.Join(ctxt.Libdir[i], name+".a")
+ pname := filepath.Join(libdir, name+".a")
if ctxt.Debugvlog != 0 {
ctxt.Logf("searching for %s.a in %s\n", name, pname)
}
toc.Type = sym.SDYNIMPORT
}
- if ctxt.LinkMode == LinkExternal && !iscgo && ctxt.LibraryByPkg["runtime/cgo"] == nil {
+ if ctxt.LinkMode == LinkExternal && !iscgo && ctxt.LibraryByPkg["runtime/cgo"] == nil && !(objabi.GOOS == "darwin" && (ctxt.Arch.Family == sys.AMD64 || ctxt.Arch.Family == sys.I386)) {
// This indicates a user requested -linkmode=external.
// The startup code uses an import of runtime/cgo to decide
// whether to initialize the TLS. So give it one. This could
// recording the value of GOARM.
if ctxt.Arch.Family == sys.ARM {
s := ctxt.Syms.Lookup("runtime.goarm", 0)
- s.Type = sym.SRODATA
+ s.Type = sym.SDATA
s.Size = 0
s.AddUint8(uint8(objabi.GOARM))
}
if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) {
s := ctxt.Syms.Lookup("runtime.framepointer_enabled", 0)
- s.Type = sym.SRODATA
+ s.Type = sym.SDATA
s.Size = 0
s.AddUint8(1)
}
x = sym.AttrCgoExportStatic
}
w := 0
- for i := 0; i < len(dynexp); i++ {
+ for i := range dynexp {
if dynexp[i].Attr&x != 0 {
dynexp[w] = dynexp[i]
w++
}
// If package versioning is required, generate a hash of the
- // the packages used in the link.
+ // packages used in the link.
if ctxt.BuildMode == BuildModeShared || ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() {
for _, lib := range ctxt.Library {
if lib.Shlib == "" {
Errorf(nil, "%s: short read on archive file symbol header", lib.File)
return
}
+ if arhdr.name != pkgdef {
+ Errorf(nil, "%s: missing package data entry", lib.File)
+ return
+ }
h := sha1.New()
if err != nil {
Exitf("cannot open file %s: %v", lib.File, err)
}
+ defer f.Close()
+ defer func() {
+ if pkg == "main" && !lib.Main {
+ Exitf("%s: not package main", lib.File)
+ }
+
+ // Ideally, we'd check that *all* object files within
+ // the archive were marked safe, but here we settle
+ // for *any*.
+ //
+ // Historically, cmd/link only checked the __.PKGDEF
+ // file, which in turn came from the first object
+ // file, typically produced by cmd/compile. The
+ // remaining object files are normally produced by
+ // cmd/asm, which doesn't support marking files as
+ // safe anyway. So at least in practice, this matches
+ // how safe mode has always worked.
+ if *flagU && !lib.Safe {
+ Exitf("%s: load of unsafe package %s", lib.File, pkg)
+ }
+ }()
for i := 0; i < len(ARMAG); i++ {
if c, err := f.ReadByte(); err == nil && c == ARMAG[i] {
/* load it as a regular file */
l := f.Seek(0, 2)
-
f.Seek(0, 0)
- ldobj(ctxt, f, lib, l, lib.File, lib.File, FileObj)
- f.Close()
-
+ ldobj(ctxt, f, lib, l, lib.File, lib.File)
return
}
- /* process __.PKGDEF */
- off := f.Offset()
-
- var arhdr ArHdr
- l := nextar(f, off, &arhdr)
- var pname string
- if l <= 0 {
- Errorf(nil, "%s: short read on archive file symbol header", lib.File)
- goto out
- }
-
- if !strings.HasPrefix(arhdr.name, pkgdef) {
- Errorf(nil, "%s: cannot find package header", lib.File)
- goto out
- }
-
- off += l
-
- ldpkg(ctxt, f, pkg, atolwhex(arhdr.size), lib.File, Pkgdef)
-
/*
* load all the object files from the archive now.
* this gives us sequential file access and keeps us
* loading every object will also make it possible to
* load foreign objects not referenced by __.PKGDEF.
*/
+ var arhdr ArHdr
+ off := f.Offset()
for {
- l = nextar(f, off, &arhdr)
+ l := nextar(f, off, &arhdr)
if l == 0 {
break
}
if l < 0 {
Exitf("%s: malformed archive", lib.File)
}
-
off += l
- pname = fmt.Sprintf("%s(%s)", lib.File, arhdr.name)
+ // __.PKGDEF isn't a real Go object file, and it's
+ // absent in -linkobj builds anyway. Skipping it
+ // ensures consistency between -linkobj and normal
+ // build modes.
+ if arhdr.name == pkgdef {
+ continue
+ }
+
+ pname := fmt.Sprintf("%s(%s)", lib.File, arhdr.name)
l = atolwhex(arhdr.size)
- ldobj(ctxt, f, lib, l, pname, lib.File, ArchiveObj)
+ ldobj(ctxt, f, lib, l, pname, lib.File)
}
-
- out:
- f.Close()
}
type Hostobj struct {
// These packages can use internal linking mode.
// Others trigger external mode.
var internalpkg = []string{
+ "crypto/internal/boring",
"crypto/x509",
"net",
"os/user",
func ldhostobj(ld func(*Link, *bio.Reader, string, int64, string), headType objabi.HeadType, f *bio.Reader, pkg string, length int64, pn string, file string) *Hostobj {
isinternal := false
- for i := 0; i < len(internalpkg); i++ {
- if pkg == internalpkg[i] {
+ for _, intpkg := range internalpkg {
+ if pkg == intpkg {
isinternal = true
break
}
// does not work, the resulting programs will not run. See
// issue #17847. To avoid this problem pass -no-pie to the
// toolchain if it is supported.
- if ctxt.BuildMode == BuildModeExe {
+ if ctxt.BuildMode == BuildModeExe && !ctxt.linkShared {
src := filepath.Join(*flagTmpdir, "trivial.c")
if err := ioutil.WriteFile(src, []byte("int main() { return 0; }"), 0666); err != nil {
Errorf(nil, "WriteFile trivial.c failed: %v", err)
}
if !*FlagS && !*FlagW && !debug_s && ctxt.HeadType == objabi.Hdarwin {
- // Skip combining dwarf on arm.
- if !ctxt.Arch.InFamily(sys.ARM, sys.ARM64) {
- dsym := filepath.Join(*flagTmpdir, "go.dwarf")
- if out, err := exec.Command("dsymutil", "-f", *flagOutfile, "-o", dsym).CombinedOutput(); err != nil {
- Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out)
- }
- // Skip combining if `dsymutil` didn't generate a file. See #11994.
- if _, err := os.Stat(dsym); os.IsNotExist(err) {
- return
- }
- // For os.Rename to work reliably, must be in same directory as outfile.
- combinedOutput := *flagOutfile + "~"
- if err := machoCombineDwarf(*flagOutfile, dsym, combinedOutput, ctxt.BuildMode); err != nil {
- Exitf("%s: combining dwarf failed: %v", os.Args[0], err)
- }
+ dsym := filepath.Join(*flagTmpdir, "go.dwarf")
+ if out, err := exec.Command("dsymutil", "-f", *flagOutfile, "-o", dsym).CombinedOutput(); err != nil {
+ Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out)
+ }
+ // Skip combining if `dsymutil` didn't generate a file. See #11994.
+ if _, err := os.Stat(dsym); os.IsNotExist(err) {
+ return
+ }
+ // For os.Rename to work reliably, must be in same directory as outfile.
+ combinedOutput := *flagOutfile + "~"
+ isIOS, err := machoCombineDwarf(*flagOutfile, dsym, combinedOutput, ctxt.BuildMode)
+ if err != nil {
+ Exitf("%s: combining dwarf failed: %v", os.Args[0], err)
+ }
+ if !isIOS {
os.Remove(*flagOutfile)
if err := os.Rename(combinedOutput, *flagOutfile); err != nil {
Exitf("%s: %v", os.Args[0], err)
// ldobj loads an input object. If it is a host object (an object
// compiled by a non-Go compiler) it returns the Hostobj pointer. If
// it is a Go object, it returns nil.
- func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, file string, whence int) *Hostobj {
+ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, file string) *Hostobj {
pkg := objabi.PathToPrefix(lib.Pkg)
eof := f.Offset() + length
import1 := f.Offset()
f.Seek(import0, 0)
- ldpkg(ctxt, f, pkg, import1-import0-2, pn, whence) // -2 for !\n
+ ldpkg(ctxt, f, lib, import1-import0-2, pn) // -2 for !\n
f.Seek(import1, 0)
objfile.Load(ctxt.Arch, ctxt.Syms, f, lib, eof-f.Offset(), pn)
// should never be called directly.
// onlyctxt.Diagnose the direct caller.
// TODO(mwhudson): actually think about this.
+ // TODO(khr): disabled for now. Calls to external functions can only happen on the g0 stack.
+ // See the trampolines in src/runtime/sys_darwin_$ARCH.go.
if depth == 1 && s.Type != sym.SXREF && !ctxt.DynlinkingGo() &&
ctxt.BuildMode != BuildModeCArchive && ctxt.BuildMode != BuildModePIE && ctxt.BuildMode != BuildModeCShared && ctxt.BuildMode != BuildModePlugin {
-
- Errorf(s, "call to external function")
+ //Errorf(s, "call to external function")
}
return -1
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: link [options] main.o\n")
- objabi.Flagprint(2)
+ objabi.Flagprint(os.Stderr)
Exit(2)
}
- func doversion() {
- Exitf("version %s", objabi.Version)
- }
-
type SymbolType int8
const (
// see also http://9p.io/magic/man2html/1/nm
TextSym SymbolType = 'T'
- DataSym = 'D'
- BSSSym = 'B'
- UndefinedSym = 'U'
- TLSSym = 't'
- FrameSym = 'm'
- ParamSym = 'p'
- AutoSym = 'a'
+ DataSym SymbolType = 'D'
+ BSSSym SymbolType = 'B'
+ UndefinedSym SymbolType = 'U'
+ TLSSym SymbolType = 't'
+ FrameSym SymbolType = 'm'
+ ParamSym SymbolType = 'p'
+ AutoSym SymbolType = 'a'
// Deleted auto (not a real sym, just placeholder for type)
DeletedAutoSym = 'x'
type markKind uint8 // for postorder traversal
const (
- unvisited markKind = iota
+ _ markKind = iota
visiting
visited
)
import (
"crypto"
+ "crypto/internal/boring"
"crypto/subtle"
"errors"
"io"
//
// WARNING: use of this function to encrypt plaintexts other than
// session keys is dangerous. Use RSA OAEP in new protocols.
-func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) ([]byte, error) {
+func EncryptPKCS1v15(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error) {
if err := checkPub(pub); err != nil {
return nil, err
}
- k := (pub.N.BitLen() + 7) / 8
+ k := pub.Size()
if len(msg) > k-11 {
return nil, ErrMessageTooLong
}
+ if boring.Enabled && random == boring.RandReader {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return boring.EncryptRSAPKCS1(bkey, msg)
+ }
+ boring.UnreachableExceptTests()
+
// EM = 0x00 || 0x02 || PS || 0x00 || M
em := make([]byte, k)
em[1] = 2
ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):]
- err := nonZeroRandomBytes(ps, rand)
+ err := nonZeroRandomBytes(ps, random)
if err != nil {
return nil, err
}
em[len(em)-len(msg)-1] = 0
copy(mm, msg)
+ if boring.Enabled {
+ var bkey *boring.PublicKeyRSA
+ bkey, err = boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return boring.EncryptRSANoPadding(bkey, em)
+ }
+
m := new(big.Int).SetBytes(em)
c := encrypt(new(big.Int), pub, m)
-
copyWithLeftPad(em, c.Bytes())
return em, nil
}
if err := checkPub(&priv.PublicKey); err != nil {
return nil, err
}
+
+ if boring.Enabled {
+ boringFakeRandomBlind(rand, priv)
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ out, err := boring.DecryptRSAPKCS1(bkey, ciphertext)
+ if err != nil {
+ return nil, ErrDecryption
+ }
+ return out, nil
+ }
+
valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
if err != nil {
return nil, err
if err := checkPub(&priv.PublicKey); err != nil {
return err
}
- k := (priv.N.BitLen() + 7) / 8
+ k := priv.Size()
if k-(len(key)+3+8) < 0 {
return ErrDecryption
}
// in order to maintain constant memory access patterns. If the plaintext was
// valid then index contains the index of the original message in em.
func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
- k := (priv.N.BitLen() + 7) / 8
+ k := priv.Size()
if k < 11 {
err = ErrDecryption
return
}
- c := new(big.Int).SetBytes(ciphertext)
- m, err := decrypt(rand, priv, c)
- if err != nil {
- return
+ if boring.Enabled {
+ boringFakeRandomBlind(rand, priv)
+ var bkey *boring.PrivateKeyRSA
+ bkey, err = boringPrivateKey(priv)
+ if err != nil {
+ return
+ }
+ em, err = boring.DecryptRSANoPadding(bkey, ciphertext)
+ if err != nil {
+ return
+ }
+ } else {
+ c := new(big.Int).SetBytes(ciphertext)
+ var m *big.Int
+ m, err = decrypt(rand, priv, c)
+ if err != nil {
+ return
+ }
+ em = leftPad(m.Bytes(), k)
}
- em = leftPad(m.Bytes(), k)
firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
// messages is small, an attacker may be able to build a map from
// messages to signatures and identify the signed messages. As ever,
// signatures provide authenticity, not confidentiality.
-func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) {
+func SignPKCS1v15(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) {
hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
if err != nil {
return nil, err
}
tLen := len(prefix) + hashLen
- k := (priv.N.BitLen() + 7) / 8
+ k := priv.Size()
if k < tLen+11 {
return nil, ErrMessageTooLong
}
+ if boring.Enabled {
+ boringFakeRandomBlind(random, priv)
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ return boring.SignRSAPKCS1v15(bkey, hash, hashed)
+ }
+
// EM = 0x00 || 0x01 || PS || 0x00 || T
em := make([]byte, k)
em[1] = 1
copy(em[k-hashLen:k], hashed)
m := new(big.Int).SetBytes(em)
- c, err := decryptAndCheck(rand, priv, m)
+ c, err := decryptAndCheck(random, priv, m)
if err != nil {
return nil, err
}
// returning a nil error. If hash is zero then hashed is used directly. This
// isn't advisable except for interoperability.
func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error {
+ if boring.Enabled {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return err
+ }
+ if err := boring.VerifyRSAPKCS1v15(bkey, hash, hashed, sig); err != nil {
+ return ErrVerification
+ }
+ return nil
+ }
+
hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
if err != nil {
return err
}
tLen := len(prefix) + hashLen
- k := (pub.N.BitLen() + 7) / 8
+ k := pub.Size()
if k < tLen+11 {
return ErrVerification
}
import (
"bytes"
"crypto"
+ "crypto/internal/boring"
"errors"
"hash"
"io"
// 3. If emLen < hLen + sLen + 2, output "encoding error" and stop.
if emLen < hLen+sLen+2 {
- return nil, errors.New("crypto/rsa: encoding error")
+ return nil, errors.New("crypto/rsa: key size too small for PSS signature")
}
em := make([]byte, emLen)
if err != nil {
return
}
+
+ if boring.Enabled {
+ boringFakeRandomBlind(rand, priv)
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ // Note: BoringCrypto takes care of the "AndCheck" part of "decryptAndCheck".
+ // (It's not just decrypt.)
+ s, err := boring.DecryptRSANoPadding(bkey, em)
+ if err != nil {
+ return nil, err
+ }
+ return s, nil
+ }
+
m := new(big.Int).SetBytes(em)
c, err := decryptAndCheck(rand, priv, m)
if err != nil {
hash = opts.Hash
}
+ if boring.Enabled && rand == boring.RandReader {
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ return boring.SignRSAPSS(bkey, hash, hashed, saltLength)
+ }
+
salt := make([]byte, saltLength)
if _, err := io.ReadFull(rand, salt); err != nil {
return nil, err
// verifyPSS verifies a PSS signature with the given salt length.
func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error {
+ if boring.Enabled {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return err
+ }
+ if err := boring.VerifyRSAPSS(bkey, hash, hashed, sig, saltLen); err != nil {
+ return ErrVerification
+ }
+ return nil
+ }
nBits := pub.N.BitLen()
if len(sig) != (nBits+7)/8 {
return ErrVerification
import (
"crypto"
+ "crypto/internal/boring"
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math"
"math/big"
+ "unsafe"
)
var bigZero = big.NewInt(0)
type PublicKey struct {
N *big.Int // modulus
E int // public exponent
+
+ boring unsafe.Pointer
}
+ // Size returns the modulus size in bytes. Raw signatures and ciphertexts
+ // for or by this public key will have the same size.
+ func (pub *PublicKey) Size() int {
+ return (pub.N.BitLen() + 7) / 8
+ }
+
// OAEPOptions is an interface for passing options to OAEP decryption using the
// crypto.Decrypter interface.
type OAEPOptions struct {
// Precomputed contains precomputed values that speed up private
// operations, if available.
Precomputed PrecomputedValues
+
+ boring unsafe.Pointer
}
// Public returns the public key corresponding to priv.
// [1] US patent 4405829 (1972, expired)
// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (*PrivateKey, error) {
+ if boring.Enabled && random == boring.RandReader && nprimes == 2 && (bits == 2048 || bits == 3072) {
+ N, E, D, P, Q, Dp, Dq, Qinv, err := boring.GenerateKeyRSA(bits)
+ if err != nil {
+ return nil, err
+ }
+ e64 := E.Int64()
+ if !E.IsInt64() || int64(int(e64)) != e64 {
+ return nil, errors.New("crypto/rsa: generated key exponent too large")
+ }
+ key := &PrivateKey{
+ PublicKey: PublicKey{
+ N: N,
+ E: int(e64),
+ },
+ D: D,
+ Primes: []*big.Int{P, Q},
+ Precomputed: PrecomputedValues{
+ Dp: Dp,
+ Dq: Dq,
+ Qinv: Qinv,
+ CRTValues: make([]CRTValue, 0), // non-nil, to match Precompute
+ },
+ }
+ return key, nil
+ }
+
priv := new(PrivateKey)
priv.E = 65537
continue NextSetOfPrimes
}
- g := new(big.Int)
priv.D = new(big.Int)
e := big.NewInt(int64(priv.E))
- g.GCD(priv.D, nil, e, totient)
+ ok := priv.D.ModInverse(e, totient)
- if g.Cmp(bigOne) == 0 {
- if priv.D.Sign() < 0 {
- priv.D.Add(priv.D, totient)
- }
+ if ok != nil {
priv.Primes = primes
priv.N = n
-
break
}
}
var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size")
func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
+ boring.Unreachable()
e := big.NewInt(int64(pub.E))
c.Exp(m, e, pub.N)
return c
return nil, err
}
hash.Reset()
- k := (pub.N.BitLen() + 7) / 8
+ k := pub.Size()
if len(msg) > k-2*hash.Size()-2 {
return nil, ErrMessageTooLong
}
+ if boring.Enabled && random == boring.RandReader {
+ bkey, err := boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ return boring.EncryptRSAOAEP(hash, bkey, msg, label)
+ }
+ boring.UnreachableExceptTests()
+
hash.Write(label)
lHash := hash.Sum(nil)
hash.Reset()
mgf1XOR(db, hash, seed)
mgf1XOR(seed, hash, db)
- m := new(big.Int)
- m.SetBytes(em)
- c := encrypt(new(big.Int), pub, m)
- out := c.Bytes()
+ var out []byte
+ if boring.Enabled {
+ var bkey *boring.PublicKeyRSA
+ bkey, err = boringPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+ c, err := boring.EncryptRSANoPadding(bkey, em)
+ if err != nil {
+ return nil, err
+ }
+ out = c
+ } else {
+ m := new(big.Int)
+ m.SetBytes(em)
+ c := encrypt(new(big.Int), pub, m)
+ out = c.Bytes()
+ }
if len(out) < k {
// If the output is too small, we need to left-pad with zeros.
// It is deliberately vague to avoid adaptive attacks.
var ErrVerification = errors.New("crypto/rsa: verification error")
- // modInverse returns ia, the inverse of a in the multiplicative group of prime
- // order n. It requires that a be a member of the group (i.e. less than n).
- func modInverse(a, n *big.Int) (ia *big.Int, ok bool) {
- g := new(big.Int)
- x := new(big.Int)
- g.GCD(x, nil, a, n)
- if g.Cmp(bigOne) != 0 {
- // In this case, a and n aren't coprime and we cannot calculate
- // the inverse. This happens because the values of n are nearly
- // prime (being the product of two primes) rather than truly
- // prime.
- return
- }
-
- if x.Cmp(bigOne) < 0 {
- // 0 is not the multiplicative inverse of any element so, if x
- // < 1, then x is negative.
- x.Add(x, n)
- }
-
- return x, true
- }
-
// Precompute performs some calculations that speed up private key operations
// in the future.
func (priv *PrivateKey) Precompute() {
// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
// random source is given, RSA blinding is used.
func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+ if len(priv.Primes) <= 2 {
+ boring.Unreachable()
+ }
// TODO(agl): can we get away with reusing blinds?
if c.Cmp(priv.N) > 0 {
err = ErrDecryption
// by multiplying by the multiplicative inverse of r.
var r *big.Int
-
+ ir = new(big.Int)
for {
r, err = rand.Int(random, priv.N)
if err != nil {
if r.Cmp(bigZero) == 0 {
r = bigOne
}
- var ok bool
- ir, ok = modInverse(r, priv.N)
- if ok {
+ ok := ir.ModInverse(r, priv.N)
+ if ok != nil {
break
}
}
if err := checkPub(&priv.PublicKey); err != nil {
return nil, err
}
- k := (priv.N.BitLen() + 7) / 8
+ k := priv.Size()
if len(ciphertext) > k ||
k < hash.Size()*2+2 {
return nil, ErrDecryption
}
+ if boring.Enabled {
+ boringFakeRandomBlind(random, priv)
+ bkey, err := boringPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ out, err := boring.DecryptRSAOAEP(hash, bkey, ciphertext, label)
+ if err != nil {
+ return nil, ErrDecryption
+ }
+ return out, nil
+ }
c := new(big.Int).SetBytes(ciphertext)
m, err := decrypt(random, priv, c)
// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
// marshal and unmarshal the internal state of the hash.
func New() hash.Hash {
+ if boringEnabled {
+ return boringNewSHA1()
+ }
d := new(digest)
d.Reset()
return d
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
+ boringUnreachable()
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
return
}
- func (d0 *digest) Sum(in []byte) []byte {
+ func (d *digest) Sum(in []byte) []byte {
+ boringUnreachable()
- // Make a copy of d0 so that caller can keep writing and summing.
- d := *d0
- hash := d.checkSum()
+ // Make a copy of d so that caller can keep writing and summing.
+ d0 := *d
+ hash := d0.checkSum()
return append(in, hash[:]...)
}
}
// ConstantTimeSum computes the same result of Sum() but in constant time
- func (d0 *digest) ConstantTimeSum(in []byte) []byte {
- d := *d0
- hash := d.constSum()
+ func (d *digest) ConstantTimeSum(in []byte) []byte {
+ d0 := *d
+ hash := d0.constSum()
return append(in, hash[:]...)
}
// Sum returns the SHA-1 checksum of the data.
func Sum(data []byte) [Size]byte {
+ if boringEnabled {
+ h := New()
+ h.Write(data)
+ var ret [Size]byte
+ h.Sum(ret[:0])
+ return ret
+ }
var d digest
d.Reset()
d.Write(data)
import (
"crypto"
+ "crypto/internal/boring"
"errors"
"hash"
)
return nil
}
+ func putUint32(x []byte, s uint32) {
+ _ = x[3]
+ x[0] = byte(s >> 24)
+ x[1] = byte(s >> 16)
+ x[2] = byte(s >> 8)
+ x[3] = byte(s)
+ }
+
+ func putUint64(x []byte, s uint64) {
+ _ = x[7]
+ x[0] = byte(s >> 56)
+ x[1] = byte(s >> 48)
+ x[2] = byte(s >> 40)
+ x[3] = byte(s >> 32)
+ x[4] = byte(s >> 24)
+ x[5] = byte(s >> 16)
+ x[6] = byte(s >> 8)
+ x[7] = byte(s)
+ }
+
func appendUint64(b []byte, x uint64) []byte {
- a := [8]byte{
- byte(x >> 56),
- byte(x >> 48),
- byte(x >> 40),
- byte(x >> 32),
- byte(x >> 24),
- byte(x >> 16),
- byte(x >> 8),
- byte(x),
- }
+ var a [8]byte
+ putUint64(a[:], x)
return append(b, a[:]...)
}
func appendUint32(b []byte, x uint32) []byte {
- a := [4]byte{
- byte(x >> 24),
- byte(x >> 16),
- byte(x >> 8),
- byte(x),
- }
+ var a [4]byte
+ putUint32(a[:], x)
return append(b, a[:]...)
}
// encoding.BinaryUnmarshaler to marshal and unmarshal the internal
// state of the hash.
func New() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA256()
+ }
d := new(digest)
d.Reset()
return d
// New224 returns a new hash.Hash computing the SHA224 checksum.
func New224() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA224()
+ }
d := new(digest)
d.is224 = true
d.Reset()
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
+ boring.Unreachable()
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
return
}
- func (d0 *digest) Sum(in []byte) []byte {
+ func (d *digest) Sum(in []byte) []byte {
+ boring.Unreachable()
- // Make a copy of d0 so that caller can keep writing and summing.
- d := *d0
- hash := d.checkSum()
- if d.is224 {
+ // Make a copy of d so that caller can keep writing and summing.
+ d0 := *d
+ hash := d0.checkSum()
+ if d0.is224 {
return append(in, hash[:Size224]...)
}
return append(in, hash[:]...)
// Length in bits.
len <<= 3
- for i := uint(0); i < 8; i++ {
- tmp[i] = byte(len >> (56 - 8*i))
- }
+ putUint64(tmp[:], len)
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
- h := d.h[:]
- if d.is224 {
- h = d.h[:7]
- }
-
var digest [Size]byte
- for i, s := range h {
- digest[i*4] = byte(s >> 24)
- digest[i*4+1] = byte(s >> 16)
- digest[i*4+2] = byte(s >> 8)
- digest[i*4+3] = byte(s)
+
+ putUint32(digest[0:], d.h[0])
+ putUint32(digest[4:], d.h[1])
+ putUint32(digest[8:], d.h[2])
+ putUint32(digest[12:], d.h[3])
+ putUint32(digest[16:], d.h[4])
+ putUint32(digest[20:], d.h[5])
+ putUint32(digest[24:], d.h[6])
+ if !d.is224 {
+ putUint32(digest[28:], d.h[7])
}
return digest
// Sum256 returns the SHA256 checksum of the data.
func Sum256(data []byte) [Size]byte {
+ if boring.Enabled {
+ h := New()
+ h.Write(data)
+ var ret [Size]byte
+ h.Sum(ret[:0])
+ return ret
+ }
var d digest
d.Reset()
d.Write(data)
// Sum224 returns the SHA224 checksum of the data.
func Sum224(data []byte) (sum224 [Size224]byte) {
+ if boring.Enabled {
+ h := New224()
+ h.Write(data)
+ var ret [Size224]byte
+ h.Sum(ret[:0])
+ return ret
+ }
var d digest
d.is224 = true
d.Reset()
import (
"crypto"
+ "crypto/internal/boring"
"errors"
"hash"
)
return nil
}
+ func putUint64(x []byte, s uint64) {
+ _ = x[7]
+ x[0] = byte(s >> 56)
+ x[1] = byte(s >> 48)
+ x[2] = byte(s >> 40)
+ x[3] = byte(s >> 32)
+ x[4] = byte(s >> 24)
+ x[5] = byte(s >> 16)
+ x[6] = byte(s >> 8)
+ x[7] = byte(s)
+ }
+
func appendUint64(b []byte, x uint64) []byte {
- a := [8]byte{
- byte(x >> 56),
- byte(x >> 48),
- byte(x >> 40),
- byte(x >> 32),
- byte(x >> 24),
- byte(x >> 16),
- byte(x >> 8),
- byte(x),
- }
+ var a [8]byte
+ putUint64(a[:], x)
return append(b, a[:]...)
}
// New returns a new hash.Hash computing the SHA-512 checksum.
func New() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA512()
+ }
d := &digest{function: crypto.SHA512}
d.Reset()
return d
// New384 returns a new hash.Hash computing the SHA-384 checksum.
func New384() hash.Hash {
+ if boring.Enabled {
+ return boring.NewSHA384()
+ }
d := &digest{function: crypto.SHA384}
d.Reset()
return d
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
+ if d.function != crypto.SHA512_224 && d.function != crypto.SHA512_256 {
+ boring.Unreachable()
+ }
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
return
}
- func (d0 *digest) Sum(in []byte) []byte {
- if d0.function != crypto.SHA512_224 && d0.function != crypto.SHA512_256 {
+ func (d *digest) Sum(in []byte) []byte {
++ if d.function != crypto.SHA512_224 && d.function != crypto.SHA512_256 {
+ boring.Unreachable()
+ }
- // Make a copy of d0 so that caller can keep writing and summing.
- d := new(digest)
- *d = *d0
- hash := d.checkSum()
- switch d.function {
+ // Make a copy of d so that caller can keep writing and summing.
+ d0 := new(digest)
+ *d0 = *d
+ hash := d0.checkSum()
+ switch d0.function {
case crypto.SHA384:
return append(in, hash[:Size384]...)
case crypto.SHA512_224:
// Length in bits.
len <<= 3
- for i := uint(0); i < 16; i++ {
- tmp[i] = byte(len >> (120 - 8*i))
- }
+ putUint64(tmp[0:], 0) // upper 64 bits are always zero, because len variable has type uint64
+ putUint64(tmp[8:], len)
d.Write(tmp[0:16])
if d.nx != 0 {
panic("d.nx != 0")
}
- h := d.h[:]
- if d.function == crypto.SHA384 {
- h = d.h[:6]
- }
-
var digest [Size]byte
- for i, s := range h {
- digest[i*8] = byte(s >> 56)
- digest[i*8+1] = byte(s >> 48)
- digest[i*8+2] = byte(s >> 40)
- digest[i*8+3] = byte(s >> 32)
- digest[i*8+4] = byte(s >> 24)
- digest[i*8+5] = byte(s >> 16)
- digest[i*8+6] = byte(s >> 8)
- digest[i*8+7] = byte(s)
+ putUint64(digest[0:], d.h[0])
+ putUint64(digest[8:], d.h[1])
+ putUint64(digest[16:], d.h[2])
+ putUint64(digest[24:], d.h[3])
+ putUint64(digest[32:], d.h[4])
+ putUint64(digest[40:], d.h[5])
+ if d.function != crypto.SHA384 {
+ putUint64(digest[48:], d.h[6])
+ putUint64(digest[56:], d.h[7])
}
return digest
// Sum512 returns the SHA512 checksum of the data.
func Sum512(data []byte) [Size]byte {
+ if boring.Enabled {
+ h := New()
+ h.Write(data)
+ var ret [Size]byte
+ h.Sum(ret[:0])
+ return ret
+ }
d := digest{function: crypto.SHA512}
d.Reset()
d.Write(data)
// Sum384 returns the SHA384 checksum of the data.
func Sum384(data []byte) (sum384 [Size384]byte) {
+ if boring.Enabled {
+ h := New384()
+ h.Write(data)
+ var ret [Size384]byte
+ h.Sum(ret[:0])
+ return ret
+ }
d := digest{function: crypto.SHA384}
d.Reset()
d.Write(data)
"crypto/cipher"
"crypto/des"
"crypto/hmac"
+ "crypto/internal/boring"
"crypto/rc4"
"crypto/sha1"
"crypto/sha256"
copy(mac.key, key)
return mac
}
- return tls10MAC{hmac.New(newConstantTimeHash(sha1.New), key)}
+ h := sha1.New
+ if !boring.Enabled {
+ h = newConstantTimeHash(h)
+ }
+ return tls10MAC{hmac.New(h, key)}
}
// macSHA256 returns a SHA-256 based MAC. These are only supported in TLS 1.2
return result, err
}
+type gcmtls interface {
+ NewGCMTLS() (cipher.AEAD, error)
+}
+
func aeadAESGCM(key, fixedNonce []byte) cipher.AEAD {
aes, err := aes.NewCipher(key)
if err != nil {
panic(err)
}
- aead, err := cipher.NewGCM(aes)
+ var aead cipher.AEAD
+ if aesTLS, ok := aes.(gcmtls); ok {
+ aead, err = aesTLS.NewGCMTLS()
+ } else {
+ boring.Unreachable()
+ aead, err = cipher.NewGCM(aes)
+ }
if err != nil {
panic(err)
}
func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) }
func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
+ if boring.Enabled {
+ // The BoringCrypto SHA1 does not have a constant-time
+ // checksum function, so don't try to use it.
+ return h
+ }
return func() hash.Hash {
return &cthWrapper{h().(constantTimeHash)}
}
// A list of cipher suite IDs that are, or have been, implemented by this
// package.
//
- // Taken from http://www.iana.org/assignments/tls-parameters/tls-parameters.xml
+ // Taken from https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
const (
TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
)
// CurveID is the type of a TLS identifier for an elliptic curve. See
- // http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8
+ // https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8
type CurveID uint16
const (
)
// TLS Elliptic Curve Point Formats
- // http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
+ // https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
const (
pointFormatUncompressed uint8 = 0
)
signatureECDSA uint8 = 3
)
-// supportedSignatureAlgorithms contains the signature and hash algorithms that
+// defaultSupportedSignatureAlgorithms contains the signature and hash algorithms that
// the code advertises as supported in a TLS 1.2 ClientHello and in a TLS 1.2
// CertificateRequest. The two fields are merged to match with TLS 1.3.
// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc.
-var supportedSignatureAlgorithms = []SignatureScheme{
+var defaultSupportedSignatureAlgorithms = []SignatureScheme{
PKCS1WithSHA256,
ECDSAWithP256AndSHA256,
PKCS1WithSHA384,
SignedCertificateTimestamps [][]byte // SCTs from the server, if any
OCSPResponse []byte // stapled OCSP response from server, if any
+ // ExportKeyMaterial returns length bytes of exported key material as
+ // defined in https://tools.ietf.org/html/rfc5705. If context is nil, it is
+ // not used as part of the seed. If Config.Renegotiation was set to allow
+ // renegotiation, this function will always return nil, false.
+ ExportKeyingMaterial func(label string, context []byte, length int) ([]byte, bool)
+
// TLSUnique contains the "tls-unique" channel binding value (see RFC
// 5929, section 3). For resumed sessions this value will be nil
// because resumption does not include enough context (see
//
// If normal verification fails then the handshake will abort before
// considering this callback. If normal verification is disabled by
- // setting InsecureSkipVerify then this callback will be considered but
- // the verifiedChains argument will always be nil.
+ // setting InsecureSkipVerify, or (for a server) when ClientAuth is
+ // RequestClientCert or RequireAnyClientCert, then this callback will
+ // be considered but the verifiedChains argument will always be nil.
VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
// RootCAs defines the set of root certificate authorities
}
func (c *Config) cipherSuites() []uint16 {
+ if needFIPS() {
+ return fipsCipherSuites(c)
+ }
s := c.CipherSuites
if s == nil {
s = defaultCipherSuites()
}
func (c *Config) minVersion() uint16 {
+ if needFIPS() {
+ return fipsMinVersion(c)
+ }
if c == nil || c.MinVersion == 0 {
return minVersion
}
}
func (c *Config) maxVersion() uint16 {
+ if needFIPS() {
+ return fipsMaxVersion(c)
+ }
if c == nil || c.MaxVersion == 0 {
return maxVersion
}
var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521}
func (c *Config) curvePreferences() []CurveID {
+ if needFIPS() {
+ return fipsCurvePreferences(c)
+ }
if c == nil || len(c.CurvePreferences) == 0 {
return defaultCurvePreferences
}
}
if hello.vers >= VersionTLS12 {
- hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms
+ hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
+ }
+ if testingOnlyForceClientHelloSignatureAlgorithms != nil {
+ hello.supportedSignatureAlgorithms = testingOnlyForceClientHelloSignatureAlgorithms
}
return hello, nil
}
- // c.out.Mutex <= L; c.handshakeMutex <= L.
func (c *Conn) clientHandshake() error {
if c.config == nil {
c.config = defaultConfig()
}
}
+ c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random)
c.didResume = isResume
c.handshakeComplete = true
if !c.config.InsecureSkipVerify {
opts := x509.VerifyOptions{
+ IsBoring: isBoringCertificate,
+
Roots: c.config.RootCAs,
CurrentTime: c.config.time(),
DNSName: c.config.ServerName,
}
}
- if hs.serverHello.ocspStapling {
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- cs, ok := msg.(*certificateStatusMsg)
- if !ok {
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
+
+ cs, ok := msg.(*certificateStatusMsg)
+ if ok {
+ // RFC4366 on Certificate Status Request:
+ // The server MAY return a "certificate_status" message.
+
+ if !hs.serverHello.ocspStapling {
+ // If a server returns a "CertificateStatus" message, then the
+ // server MUST have included an extension of type "status_request"
+ // with empty "extension_data" in the extended server hello.
+
c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(cs, msg)
+ return errors.New("tls: received unexpected CertificateStatus message")
}
hs.finishedHash.Write(cs.marshal())
if cs.statusType == statusTypeOCSP {
c.ocspResponse = cs.response
}
- }
- msg, err = c.readHandshake()
- if err != nil {
- return err
+ msg, err = c.readHandshake()
+ if err != nil {
+ return err
+ }
}
keyAgreement := hs.suite.ka(c.vers)
}
// serverHandshake performs a TLS handshake as a server.
- // c.out.Mutex <= L; c.handshakeMutex <= L.
func (c *Conn) serverHandshake() error {
// If this is the first server handshake, we generate a random key to
// encrypt the tickets with.
return err
}
}
+
+ c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random)
c.handshakeComplete = true
return nil
}
if c.vers >= VersionTLS12 {
certReq.hasSignatureAndHash = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
+ certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
}
// An empty list of certificateAuthorities signals to
var sigType uint8
if certVerify.hasSignatureAndHash {
signatureAlgorithm = certVerify.signatureAlgorithm
- if !isSupportedSignatureAlgorithm(signatureAlgorithm, supportedSignatureAlgorithms) {
+ if !isSupportedSignatureAlgorithm(signatureAlgorithm, supportedSignatureAlgorithms()) {
return errors.New("tls: unsupported hash function for client certificate")
}
sigType = signatureFromSignatureScheme(signatureAlgorithm)
if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 {
opts := x509.VerifyOptions{
+ IsBoring: isBoringCertificate,
+
Roots: c.config.ClientCAs,
CurrentTime: c.config.time(),
Intermediates: x509.NewCertPool(),
// client's CertificateVerify with, or an error if none can be found.
func (h finishedHash) selectClientCertSignatureAlgorithm(serverList []SignatureScheme, sigType uint8) (SignatureScheme, error) {
for _, v := range serverList {
- if signatureFromSignatureScheme(v) == sigType && isSupportedSignatureAlgorithm(v, supportedSignatureAlgorithms) {
+ if signatureFromSignatureScheme(v) == sigType && isSupportedSignatureAlgorithm(v, supportedSignatureAlgorithms()) {
return v, nil
}
}
func (h *finishedHash) discardHandshakeBuffer() {
h.buffer = nil
}
+
+ // noExportedKeyingMaterial is used as a value of
+ // ConnectionState.ExportKeyingMaterial when renegotation is enabled and thus
+ // we wish to fail all key-material export requests.
+ func noExportedKeyingMaterial(label string, context []byte, length int) ([]byte, bool) {
+ return nil, false
+ }
+
+ // ekmFromMasterSecret generates exported keying material as defined in
+ // https://tools.ietf.org/html/rfc5705.
+ func ekmFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte) func(string, []byte, int) ([]byte, bool) {
+ return func(label string, context []byte, length int) ([]byte, bool) {
+ switch label {
+ case "client finished", "server finished", "master secret", "key expansion":
+ // These values are reserved and may not be used.
+ return nil, false
+ }
+
+ seedLen := len(serverRandom) + len(clientRandom)
+ if context != nil {
+ seedLen += 2 + len(context)
+ }
+ seed := make([]byte, 0, seedLen)
+
+ seed = append(seed, clientRandom...)
+ seed = append(seed, serverRandom...)
+
+ if context != nil {
+ if len(context) >= 1<<16 {
+ return nil, false
+ }
+ seed = append(seed, byte(len(context)>>8), byte(len(context)))
+ seed = append(seed, context...)
+ }
+
+ keyMaterial := make([]byte, length)
+ prfForVersion(version, suite)(keyMaterial, masterSecret, []byte(label), seed)
+ return keyMaterial, true
+ }
+ }
import (
"bytes"
+ "encoding/asn1"
"errors"
"fmt"
"net"
"net/url"
"reflect"
"runtime"
+ "strconv"
"strings"
"time"
"unicode/utf8"
// name constraints, but leaf certificate contains a name of an
// unsupported or unconstrained type.
UnconstrainedName
- // TooManyConstraints results when the number of comparision operations
+ // TooManyConstraints results when the number of comparison operations
// needed to check a certificate exceeds the limit set by
// VerifyOptions.MaxConstraintComparisions. This limit exists to
// prevent pathological certificates can consuming excessive amounts of
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
// because other PKIX verification APIs have ended up needing many options.
type VerifyOptions struct {
+ // IsBoring is a validity check for BoringCrypto.
+ // If not nil, it will be called to check whether a given certificate
+ // can be used for constructing verification chains.
+ IsBoring func(*Certificate) bool
+
DNSName string
Intermediates *CertPool
Roots *CertPool // if nil, the system roots are used
CurrentTime time.Time // if zero, the current time is used
- // KeyUsage specifies which Extended Key Usage values are acceptable.
- // An empty list means ExtKeyUsageServerAuth. Key usage is considered a
- // constraint down the chain which mirrors Windows CryptoAPI behavior,
- // but not the spec. To accept any key usage, include ExtKeyUsageAny.
+ // KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
+ // certificate is accepted if it contains any of the listed values. An empty
+ // list means ExtKeyUsageServerAuth. To accept any key usage, include
+ // ExtKeyUsageAny.
+ //
+ // Certificate chains are required to nest extended key usage values,
+ // irrespective of this value. This matches the Windows CryptoAPI behavior,
+ // but not the spec.
KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If
- // zero, a sensible default is used. This limit prevents pathalogical
+ // zero, a sensible default is used. This limit prevents pathological
// certificates from consuming excessive amounts of CPU time when
// validating.
MaxConstraintComparisions int
return nil
}
+ const (
+ checkingAgainstIssuerCert = iota
+ checkingAgainstLeafCert
+ )
+
// ekuPermittedBy returns true iff the given extended key usage is permitted by
// the given EKU from a certificate. Normally, this would be a simple
// comparison plus a special case for the “any” EKU. But, in order to support
// existing certificates, some exceptions are made.
- func ekuPermittedBy(eku, certEKU ExtKeyUsage) bool {
+ func ekuPermittedBy(eku, certEKU ExtKeyUsage, context int) bool {
if certEKU == ExtKeyUsageAny || eku == certEKU {
return true
}
eku = mapServerAuthEKUs(eku)
certEKU = mapServerAuthEKUs(certEKU)
- if eku == certEKU ||
- // ServerAuth in a CA permits ClientAuth in the leaf.
- (eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
+ if eku == certEKU {
+ return true
+ }
+
+ // If checking a requested EKU against the list in a leaf certificate there
+ // are fewer exceptions.
+ if context == checkingAgainstLeafCert {
+ return false
+ }
+
+ // ServerAuth in a CA permits ClientAuth in the leaf.
+ return (eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
// Any CA may issue an OCSP responder certificate.
eku == ExtKeyUsageOCSPSigning ||
// Code-signing CAs can use Microsoft's commercial and
// kernel-mode EKUs.
- ((eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning) {
- return true
- }
-
- return false
+ (eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning
}
// isValid performs validity checks on c given that it is a candidate to append
name := string(data)
mailbox, ok := parseRFC2821Mailbox(name)
if !ok {
- // This certificate should not have parsed.
- return errors.New("x509: internal error: rfc822Name SAN failed to parse")
+ return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
case nameTypeDNS:
name := string(data)
+ if _, ok := domainToReverseLabels(name); !ok {
+ return fmt.Errorf("x509: cannot parse dnsName %q", name)
+ }
+
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
func(parsedName, constraint interface{}) (bool, error) {
return matchDomainConstraint(parsedName.(string), constraint.(string))
for _, caEKU := range c.ExtKeyUsage {
comparisonCount++
- if ekuPermittedBy(eku, caEKU) {
+ if ekuPermittedBy(eku, caEKU, checkingAgainstIssuerCert) {
continue NextEKU
}
}
}
}
+ if opts.IsBoring != nil && !opts.IsBoring(c) {
+ // IncompatibleUsage is not quite right here,
+ // but it's also the "no chains found" error
+ // and is close enough.
+ return CertificateInvalidError{c, IncompatibleUsage, ""}
+ }
+
return nil
}
+ // formatOID formats an ASN.1 OBJECT IDENTIFER in the common, dotted style.
+ func formatOID(oid asn1.ObjectIdentifier) string {
+ ret := ""
+ for i, v := range oid {
+ if i > 0 {
+ ret += "."
+ }
+ ret += strconv.Itoa(v)
+ }
+ return ret
+ }
+
// Verify attempts to verify c by building one or more chains from c to a
// certificate in opts.Roots, using certificates in opts.Intermediates if
// needed. If successful, it returns one or more chains where the first
// If opts.Roots is nil and system roots are unavailable the returned error
// will be of type SystemRootsError.
//
- // WARNING: this doesn't do any revocation checking.
+ // Name constraints in the intermediates will be applied to all names claimed
+ // in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim
+ // example.com if an intermediate doesn't permit it, even if example.com is not
+ // the name being validated. Note that DirectoryName constraints are not
+ // supported.
+ //
+ // Extended Key Usage values are enforced down a chain, so an intermediate or
+ // root that enumerates EKUs prevents a leaf from asserting an EKU not in that
+ // list.
+ //
+ // WARNING: this function doesn't do any revocation checking.
func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
// Platform-specific verification needs the ASN.1 contents so
// this makes the behavior consistent across platforms.
}
if checkEKU {
+ foundMatch := false
NextUsage:
for _, eku := range requestedKeyUsages {
for _, leafEKU := range c.ExtKeyUsage {
- if ekuPermittedBy(eku, leafEKU) {
- continue NextUsage
+ if ekuPermittedBy(eku, leafEKU, checkingAgainstLeafCert) {
+ foundMatch = true
+ break NextUsage
}
}
+ }
- oid, _ := oidFromExtKeyUsage(eku)
- return nil, CertificateInvalidError{c, IncompatibleUsage, fmt.Sprintf("%#v", oid)}
+ if !foundMatch {
+ msg := "leaf contains the following, recognized EKUs: "
+
+ for i, leafEKU := range c.ExtKeyUsage {
+ oid, ok := oidFromExtKeyUsage(leafEKU)
+ if !ok {
+ continue
+ }
+
+ if i > 0 {
+ msg += ", "
+ }
+ msg += formatOID(oid)
+ }
+
+ return nil, CertificateInvalidError{c, IncompatibleUsage, msg}
}
}
// L0 is the lowest level, core, nearly unavoidable packages.
"errors": {},
"io": {"errors", "sync", "sync/atomic"},
- "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"},
+ "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys", "internal/cpu", "internal/bytealg"},
"runtime/internal/sys": {},
"runtime/internal/atomic": {"unsafe", "runtime/internal/sys"},
"internal/race": {"runtime", "unsafe"},
"sync": {"internal/race", "runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
"unsafe": {},
- "internal/cpu": {"runtime"},
+ "internal/cpu": {},
+ "internal/bytealg": {"unsafe", "internal/cpu"},
"L0": {
"errors",
"sync/atomic",
"unsafe",
"internal/cpu",
+ "internal/bytealg",
},
// L1 adds simple functions and strings processing,
"math/bits": {},
"math/cmplx": {"math"},
"math/rand": {"L0", "math"},
- "strconv": {"L0", "unicode/utf8", "math"},
+ "strconv": {"L0", "unicode/utf8", "math", "math/bits"},
"unicode/utf16": {},
"unicode/utf8": {},
"reflect": {"L2"},
"sort": {"reflect"},
+ "crypto/internal/boring": {"L2", "C", "crypto", "crypto/cipher", "crypto/internal/boring/sig", "crypto/subtle", "encoding/asn1", "hash", "math/big"},
+ "crypto/internal/boring/fipstls": {"sync/atomic"},
+ "crypto/internal/cipherhw": {"crypto/internal/boring"},
+ "crypto/tls/fipsonly": {"crypto/internal/boring/fipstls", "crypto/internal/boring/sig"},
+
"L3": {
"L2",
"crypto",
"crypto/cipher",
+ "crypto/internal/boring",
+ "crypto/internal/boring/fipstls",
"crypto/internal/cipherhw",
"crypto/subtle",
"encoding/base32",
// End of linear dependency definitions.
// Operating system access.
- "syscall": {"L0", "internal/race", "internal/syscall/windows/sysdll", "unicode/utf16"},
+ "syscall": {"L0", "internal/race", "internal/syscall/windows/sysdll", "syscall/js", "unicode/utf16"},
+ "syscall/js": {"unsafe"},
"internal/syscall/unix": {"L0", "syscall"},
"internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll"},
"internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
"syscall",
},
- "internal/poll": {"L0", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows"},
- "os": {"L1", "os", "syscall", "time", "internal/poll", "internal/syscall/windows"},
- "path/filepath": {"L2", "os", "syscall", "internal/syscall/windows"},
- "io/ioutil": {"L2", "os", "path/filepath", "time"},
- "os/exec": {"L2", "os", "context", "path/filepath", "syscall"},
- "os/signal": {"L2", "os", "syscall"},
+ "internal/poll": {"L0", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows"},
+ "internal/testlog": {"L0"},
+ "os": {"L1", "os", "syscall", "time", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/testlog"},
+ "path/filepath": {"L2", "os", "syscall", "internal/syscall/windows"},
+ "io/ioutil": {"L2", "os", "path/filepath", "time"},
+ "os/exec": {"L2", "os", "context", "path/filepath", "syscall"},
+ "os/signal": {"L2", "os", "syscall"},
// OS enables basic operating system functionality,
// but not direct use of package syscall, nor os/signal.
"regexp/syntax": {"L2"},
"runtime/debug": {"L2", "fmt", "io/ioutil", "os", "time"},
"runtime/pprof": {"L2", "compress/gzip", "context", "encoding/binary", "fmt", "io/ioutil", "os", "text/tabwriter", "time"},
- "runtime/trace": {"L0"},
+ "runtime/trace": {"L0", "context", "fmt"},
"text/tabwriter": {"L2"},
"testing": {"L2", "flag", "fmt", "internal/race", "os", "runtime/debug", "runtime/pprof", "runtime/trace", "time"},
"go/importer": {"L4", "go/build", "go/internal/gccgoimporter", "go/internal/gcimporter", "go/internal/srcimporter", "go/token", "go/types"},
"go/internal/gcimporter": {"L4", "OS", "go/build", "go/constant", "go/token", "go/types", "text/scanner"},
"go/internal/gccgoimporter": {"L4", "OS", "debug/elf", "go/constant", "go/token", "go/types", "text/scanner"},
- "go/internal/srcimporter": {"L4", "fmt", "go/ast", "go/build", "go/parser", "go/token", "go/types", "path/filepath"},
+ "go/internal/srcimporter": {"L4", "OS", "fmt", "go/ast", "go/build", "go/parser", "go/token", "go/types", "path/filepath"},
"go/types": {"L4", "GOPARSER", "container/heap", "go/constant"},
// One of a kind.
- "archive/tar": {"L4", "OS", "syscall", "os/user"},
- "archive/zip": {"L4", "OS", "compress/flate"},
- "container/heap": {"sort"},
- "compress/bzip2": {"L4"},
- "compress/flate": {"L4"},
- "compress/gzip": {"L4", "compress/flate"},
- "compress/lzw": {"L4"},
- "compress/zlib": {"L4", "compress/flate"},
- "context": {"errors", "fmt", "reflect", "sync", "time"},
- "database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"},
- "database/sql/driver": {"L4", "context", "time", "database/sql/internal"},
- "debug/dwarf": {"L4"},
- "debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"},
- "debug/gosym": {"L4"},
- "debug/macho": {"L4", "OS", "debug/dwarf"},
- "debug/pe": {"L4", "OS", "debug/dwarf"},
- "debug/plan9obj": {"L4", "OS"},
- "encoding": {"L4"},
- "encoding/ascii85": {"L4"},
- "encoding/asn1": {"L4", "math/big"},
- "encoding/csv": {"L4"},
- "encoding/gob": {"L4", "OS", "encoding"},
- "encoding/hex": {"L4"},
- "encoding/json": {"L4", "encoding"},
- "encoding/pem": {"L4"},
- "encoding/xml": {"L4", "encoding"},
- "flag": {"L4", "OS"},
- "go/build": {"L4", "OS", "GOPARSER"},
- "html": {"L4"},
- "image/draw": {"L4", "image/internal/imageutil"},
- "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"},
- "image/internal/imageutil": {"L4"},
- "image/jpeg": {"L4", "image/internal/imageutil"},
- "image/png": {"L4", "compress/zlib"},
- "index/suffixarray": {"L4", "regexp"},
- "internal/singleflight": {"sync"},
- "internal/trace": {"L4", "OS"},
- "math/big": {"L4"},
- "mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
- "mime/quotedprintable": {"L4"},
- "net/internal/socktest": {"L4", "OS", "syscall", "internal/syscall/windows"},
- "net/url": {"L4"},
- "plugin": {"L0", "OS", "CGO"},
+ "archive/tar": {"L4", "OS", "syscall", "os/user"},
+ "archive/zip": {"L4", "OS", "compress/flate"},
+ "container/heap": {"sort"},
+ "compress/bzip2": {"L4"},
+ "compress/flate": {"L4"},
+ "compress/gzip": {"L4", "compress/flate"},
+ "compress/lzw": {"L4"},
+ "compress/zlib": {"L4", "compress/flate"},
+ "context": {"errors", "fmt", "reflect", "sync", "time"},
+ "database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"},
+ "database/sql/driver": {"L4", "context", "time", "database/sql/internal"},
+ "debug/dwarf": {"L4"},
+ "debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"},
+ "debug/gosym": {"L4"},
+ "debug/macho": {"L4", "OS", "debug/dwarf"},
+ "debug/pe": {"L4", "OS", "debug/dwarf"},
+ "debug/plan9obj": {"L4", "OS"},
+ "encoding": {"L4"},
+ "encoding/ascii85": {"L4"},
+ "encoding/asn1": {"L4", "math/big"},
+ "encoding/csv": {"L4"},
+ "encoding/gob": {"L4", "OS", "encoding"},
+ "encoding/hex": {"L4"},
+ "encoding/json": {"L4", "encoding"},
+ "encoding/pem": {"L4"},
+ "encoding/xml": {"L4", "encoding"},
+ "flag": {"L4", "OS"},
+ "go/build": {"L4", "OS", "GOPARSER"},
+ "html": {"L4"},
+ "image/draw": {"L4", "image/internal/imageutil"},
+ "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"},
+ "image/internal/imageutil": {"L4"},
+ "image/jpeg": {"L4", "image/internal/imageutil"},
+ "image/png": {"L4", "compress/zlib"},
+ "index/suffixarray": {"L4", "regexp"},
+ "internal/singleflight": {"sync"},
+ "internal/trace": {"L4", "OS"},
+ "math/big": {"L4"},
+ "mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
+ "mime/quotedprintable": {"L4"},
+ "net/internal/socktest": {"L4", "OS", "syscall", "internal/syscall/windows"},
+ "net/url": {"L4"},
+ "plugin": {"L0", "OS", "CGO"},
"runtime/pprof/internal/profile": {"L4", "OS", "compress/gzip", "regexp"},
- "testing/internal/testdeps": {"L4", "runtime/pprof", "regexp"},
+ "testing/internal/testdeps": {"L4", "internal/testlog", "runtime/pprof", "regexp"},
"text/scanner": {"L4", "OS"},
"text/template/parse": {"L4"},
"runtime/msan": {"C"},
// Plan 9 alone needs io/ioutil and os.
- "os/user": {"L4", "CGO", "io/ioutil", "os", "syscall"},
+ "os/user": {"L4", "CGO", "io/ioutil", "os", "syscall", "internal/syscall/windows", "internal/syscall/windows/registry"},
// Internal package used only for testing.
- "os/signal/internal/pty": {"CGO", "fmt", "os"},
+ "os/signal/internal/pty": {"CGO", "fmt", "os", "syscall"},
// Basic networking.
// Because net must be used by any package that wants to
"context", "math/rand", "os", "reflect", "sort", "syscall", "time",
"internal/nettrace", "internal/poll",
"internal/syscall/windows", "internal/singleflight", "internal/race",
- "golang_org/x/net/lif", "golang_org/x/net/route",
+ "golang_org/x/net/dns/dnsmessage", "golang_org/x/net/lif", "golang_org/x/net/route",
},
// NET enables use of basic network-related packages.
// Random byte, number generation.
// This would be part of core crypto except that it imports
// math/big, which imports fmt.
- "crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall", "internal/syscall/unix"},
+ "crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall", "syscall/js", "internal/syscall/unix"},
// Mathematical crypto: dependencies on fmt (L4) and math/big.
// We could avoid some of the fmt, but math/big imports fmt anyway.
"context",
"crypto/rand",
"crypto/tls",
+ "golang_org/x/net/http/httpguts",
"golang_org/x/net/http2/hpack",
"golang_org/x/net/idna",
- "golang_org/x/net/lex/httplex",
- "golang_org/x/net/proxy",
"golang_org/x/text/unicode/norm",
"golang_org/x/text/width",
"internal/nettrace",
"net/http/cgi": {"L4", "NET", "OS", "crypto/tls", "net/http", "regexp"},
"net/http/cookiejar": {"L4", "NET", "net/http"},
"net/http/fcgi": {"L4", "NET", "OS", "context", "net/http", "net/http/cgi"},
- "net/http/httptest": {"L4", "NET", "OS", "crypto/tls", "flag", "net/http", "net/http/internal", "crypto/x509"},
- "net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal"},
- "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof", "runtime/trace"},
- "net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http"},
- "net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"},
+ "net/http/httptest": {
+ "L4", "NET", "OS", "crypto/tls", "flag", "net/http", "net/http/internal", "crypto/x509",
+ "golang_org/x/net/http/httpguts",
+ },
+ "net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal"},
+ "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof", "runtime/trace"},
+ "net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http"},
+ "net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"},
}
// isMacro reports whether p is a package dependency macro