]> Cypherpunks.ru repositories - gostls13.git/commitdiff
runtime: break out system-specific constants into package sys
authorMichael Matloob <matloob@golang.org>
Wed, 11 Nov 2015 17:39:30 +0000 (12:39 -0500)
committerMichael Matloob <matloob@golang.org>
Thu, 12 Nov 2015 17:04:45 +0000 (17:04 +0000)
runtime/internal/sys will hold system-, architecture- and config-
specific constants.

Updates #11647

Change-Id: I6db29c312556087a42e8d2bdd9af40d157c56b54
Reviewed-on: https://go-review.googlesource.com/16817
Reviewed-by: Russ Cox <rsc@golang.org>
101 files changed:
.gitignore
src/cmd/compile/internal/gc/racewalk.go
src/cmd/dist/build.go
src/cmd/dist/buildruntime.go
src/cmd/dist/deps.go
src/go/build/deps_test.go
src/runtime/alg.go
src/runtime/arch_386.go [deleted file]
src/runtime/arch_amd64.go [deleted file]
src/runtime/arch_amd64p32.go [deleted file]
src/runtime/arch_arm.go [deleted file]
src/runtime/arch_arm64.go [deleted file]
src/runtime/arch_ppc64.go [deleted file]
src/runtime/arch_ppc64le.go [deleted file]
src/runtime/cgocall.go
src/runtime/export_test.go
src/runtime/extern.go
src/runtime/hashmap.go
src/runtime/hashmap_fast.go
src/runtime/heapdump.go
src/runtime/iface.go
src/runtime/internal/sys/arch_386.go [new file with mode: 0644]
src/runtime/internal/sys/arch_amd64.go [new file with mode: 0644]
src/runtime/internal/sys/arch_amd64p32.go [new file with mode: 0644]
src/runtime/internal/sys/arch_arm.go [new file with mode: 0644]
src/runtime/internal/sys/arch_arm64.go [new file with mode: 0644]
src/runtime/internal/sys/arch_ppc64.go [new file with mode: 0644]
src/runtime/internal/sys/arch_ppc64le.go [new file with mode: 0644]
src/runtime/internal/sys/gengoos.go [moved from src/runtime/gengoos.go with 81% similarity]
src/runtime/internal/sys/stubs.go [new file with mode: 0644]
src/runtime/internal/sys/sys.go [new file with mode: 0644]
src/runtime/internal/sys/zgoarch_386.go [new file with mode: 0644]
src/runtime/internal/sys/zgoarch_amd64.go [new file with mode: 0644]
src/runtime/internal/sys/zgoarch_amd64p32.go [new file with mode: 0644]
src/runtime/internal/sys/zgoarch_arm.go [new file with mode: 0644]
src/runtime/internal/sys/zgoarch_arm64.go [new file with mode: 0644]
src/runtime/internal/sys/zgoarch_ppc64.go [new file with mode: 0644]
src/runtime/internal/sys/zgoarch_ppc64le.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_android.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_darwin.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_dragonfly.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_freebsd.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_linux.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_nacl.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_netbsd.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_openbsd.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_plan9.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_solaris.go [new file with mode: 0644]
src/runtime/internal/sys/zgoos_windows.go [new file with mode: 0644]
src/runtime/malloc.go
src/runtime/mbarrier.go
src/runtime/mbitmap.go
src/runtime/mem_bsd.go
src/runtime/mem_linux.go
src/runtime/mfinal.go
src/runtime/mgc.go
src/runtime/mgcmark.go
src/runtime/mgcsweep.go
src/runtime/mgcwork.go
src/runtime/mheap.go
src/runtime/mstats.go
src/runtime/mstkbar.go
src/runtime/os1_freebsd.go
src/runtime/os1_linux.go
src/runtime/os3_plan9.go
src/runtime/os_linux_386.go
src/runtime/os_linux_arm.go
src/runtime/parfor.go
src/runtime/proc.go
src/runtime/runtime1.go
src/runtime/runtime2.go
src/runtime/select.go
src/runtime/sema.go
src/runtime/signal1_unix.go
src/runtime/signal_386.go
src/runtime/signal_amd64x.go
src/runtime/signal_arm64.go
src/runtime/signal_linux_386.go
src/runtime/signal_linux_amd64.go
src/runtime/signal_linux_arm.go
src/runtime/signal_linux_arm64.go
src/runtime/signal_linux_ppc64x.go
src/runtime/signal_ppc64x.go
src/runtime/stack.go
src/runtime/stubs.go
src/runtime/symtab.go
src/runtime/sys_x86.go
src/runtime/trace.go
src/runtime/traceback.go
src/runtime/vdso_linux_amd64.go
src/runtime/zgoos_android.go [deleted file]
src/runtime/zgoos_darwin.go [deleted file]
src/runtime/zgoos_dragonfly.go [deleted file]
src/runtime/zgoos_freebsd.go [deleted file]
src/runtime/zgoos_linux.go [deleted file]
src/runtime/zgoos_nacl.go [deleted file]
src/runtime/zgoos_netbsd.go [deleted file]
src/runtime/zgoos_openbsd.go [deleted file]
src/runtime/zgoos_plan9.go [deleted file]
src/runtime/zgoos_solaris.go [deleted file]
src/runtime/zgoos_windows.go [deleted file]

index 6a3ce32ec8a62d7d98b065c13d1439bfd0bf3c05..de48481bf1412148e9c839eea1dd27cceb238358 100644 (file)
@@ -29,7 +29,7 @@ src/cmd/cgo/zdefaultcc.go
 src/cmd/go/zdefaultcc.go
 src/cmd/internal/obj/zbootstrap.go
 src/go/doc/headscan
-src/runtime/zversion.go
+src/runtime/internal/sys/zversion.go
 src/unicode/maketables
 src/*.*/
 test/pass.out
index 989d3596dac7924834d5efc0fcb0242707e9dcbd..35e06b9e7eefdeea5f467ad70965e954f23221dd 100644 (file)
@@ -31,7 +31,7 @@ import (
 
 // Do not instrument the following packages at all,
 // at best instrumentation would cause infinite recursion.
-var omit_pkgs = []string{"runtime/internal/atomic", "runtime", "runtime/race", "runtime/msan"}
+var omit_pkgs = []string{"runtime/internal/atomic", "runtime/internal/sys", "runtime", "runtime/race", "runtime/msan"}
 
 // Only insert racefuncenter/racefuncexit into the following packages.
 // Memory accesses in the packages are either uninteresting or will cause false positives.
index a30cd862b5ee1e24e952cef9b8022748737e0ad5..3fdefbc890797d127edfe8f032a52e01a8da6300 100644 (file)
@@ -463,7 +463,7 @@ var deptab = []struct {
        {"cmd/go", []string{
                "zdefaultcc.go",
        }},
-       {"runtime", []string{
+       {"runtime/internal/sys", []string{
                "zversion.go",
        }},
 }
index 1e7b4a7f625660748bd1fff0b1f5e926be616c31..c1a9b817f88a01267e4ea5891ade7215c033d59d 100644 (file)
@@ -17,23 +17,23 @@ import (
 // mkzversion writes zversion.go:
 //
 //     package runtime
-//     const defaultGoroot = <goroot>
-//     const theVersion = <version>
-//     const goexperiment = <goexperiment>
-//     const stackGuardMultiplier = <multiplier value>
-//     const buildVersion = <build version>
+//     const DefaultGoroot = <goroot>
+//     const TheVersion = <version>
+//     const Goexperiment = <goexperiment>
+//     const StackGuardMultiplier = <multiplier value>
+//     const BuildVersion = <build version>
 //
 func mkzversion(dir, file string) {
        out := fmt.Sprintf(
                "// auto generated by go tool dist\n"+
                        "\n"+
-                       "package runtime\n"+
+                       "package sys\n"+
                        "\n"+
-                       "const defaultGoroot = `%s`\n"+
-                       "const theVersion = `%s`\n"+
-                       "const goexperiment = `%s`\n"+
-                       "const stackGuardMultiplier = %d\n\n"+
-                       "var buildVersion = theVersion\n", goroot_final, findgoversion(), os.Getenv("GOEXPERIMENT"), stackGuardMultiplier())
+                       "const DefaultGoroot = `%s`\n"+
+                       "const TheVersion = `%s`\n"+
+                       "const Goexperiment = `%s`\n"+
+                       "const StackGuardMultiplier = %d\n\n"+
+                       "var BuildVersion = TheVersion\n", goroot_final, findgoversion(), os.Getenv("GOEXPERIMENT"), stackGuardMultiplier())
 
        writefile(out, file, writeSkipSame)
 }
index 3859977607ee7b230ba15ccedab2a6d48e6ed52a..4e20b5600be0845151c3ce606ec822229279bdac 100644 (file)
@@ -3,56 +3,57 @@
 package main
 
 var builddeps = map[string][]string{
-       "bufio":                             {"bytes", "errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
-       "bytes":                             {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
-       "container/heap":                    {"runtime", "runtime/internal/atomic", "sort"},
-       "crypto":                            {"errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
-       "crypto/sha1":                       {"crypto", "errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
-       "debug/dwarf":                       {"encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "debug/elf":                         {"bytes", "debug/dwarf", "encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "encoding":                          {"runtime", "runtime/internal/atomic"},
-       "encoding/base64":                   {"errors", "io", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
-       "encoding/binary":                   {"errors", "io", "math", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
-       "encoding/json":                     {"bytes", "encoding", "encoding/base64", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "errors":                            {"runtime", "runtime/internal/atomic"},
-       "flag":                              {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
-       "fmt":                               {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
-       "go/ast":                            {"bytes", "errors", "fmt", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "go/build":                          {"bufio", "bytes", "errors", "fmt", "go/ast", "go/doc", "go/parser", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "go/doc":                            {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "go/parser":                         {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "go/scanner":                        {"bytes", "errors", "fmt", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "go/token":                          {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
-       "hash":                              {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
-       "internal/singleflight":             {"runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
-       "internal/syscall/windows":          {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
-       "internal/syscall/windows/registry": {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
-       "io":                      {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic"},
-       "io/ioutil":               {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "log":                     {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
-       "math":                    {"runtime", "runtime/internal/atomic"},
-       "net/url":                 {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "os":                      {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
-       "os/exec":                 {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "os/signal":               {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
-       "path":                    {"errors", "io", "runtime", "runtime/internal/atomic", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
-       "path/filepath":           {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "sort", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "reflect":                 {"errors", "math", "runtime", "runtime/internal/atomic", "strconv", "sync", "sync/atomic", "unicode/utf8"},
-       "regexp":                  {"bytes", "errors", "io", "math", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
-       "regexp/syntax":           {"bytes", "errors", "io", "math", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
-       "runtime":                 {"runtime/internal/atomic"},
+       "bufio":                             {"bytes", "errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+       "bytes":                             {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+       "container/heap":                    {"runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort"},
+       "crypto":                            {"errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+       "crypto/sha1":                       {"crypto", "errors", "hash", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+       "debug/dwarf":                       {"encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "debug/elf":                         {"bytes", "debug/dwarf", "encoding/binary", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "encoding":                          {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "encoding/base64":                   {"errors", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+       "encoding/binary":                   {"errors", "io", "math", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+       "encoding/json":                     {"bytes", "encoding", "encoding/base64", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "errors":                            {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "flag":                              {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+       "fmt":                               {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+       "go/ast":                            {"bytes", "errors", "fmt", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "go/build":                          {"bufio", "bytes", "errors", "fmt", "go/ast", "go/doc", "go/parser", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "go/doc":                            {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "go/parser":                         {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "go/scanner":                        {"bytes", "errors", "fmt", "go/token", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "go/token":                          {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+       "hash":                              {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"},
+       "internal/singleflight":             {"runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"},
+       "internal/syscall/windows":          {"errors", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+       "internal/syscall/windows/registry": {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+       "io":                      {"errors", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"},
+       "io/ioutil":               {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "log":                     {"errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+       "math":                    {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "net/url":                 {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "os":                      {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+       "os/exec":                 {"bytes", "errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "path/filepath", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "os/signal":               {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"},
+       "path":                    {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+       "path/filepath":           {"errors", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "os", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "reflect":                 {"errors", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"},
+       "regexp":                  {"bytes", "errors", "io", "math", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+       "regexp/syntax":           {"bytes", "errors", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+       "runtime":                 {"runtime/internal/atomic", "runtime/internal/sys"},
        "runtime/internal/atomic": {},
-       "sort":                {"runtime", "runtime/internal/atomic"},
-       "strconv":             {"errors", "math", "runtime", "runtime/internal/atomic", "unicode/utf8"},
-       "strings":             {"errors", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode", "unicode/utf8"},
-       "sync":                {"runtime", "runtime/internal/atomic", "sync/atomic"},
-       "sync/atomic":         {"runtime", "runtime/internal/atomic"},
-       "syscall":             {"errors", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "unicode/utf16"},
-       "text/template":       {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "text/template/parse": {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
-       "time":                {"errors", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "sync", "sync/atomic", "syscall", "unicode/utf16"},
-       "unicode":             {"runtime", "runtime/internal/atomic"},
-       "unicode/utf16":       {"runtime", "runtime/internal/atomic"},
-       "unicode/utf8":        {"runtime", "runtime/internal/atomic"},
-       "cmd/go":              {"bufio", "bytes", "container/heap", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "runtime/internal/sys":    {},
+       "sort":                    {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "strconv":                 {"errors", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "unicode/utf8"},
+       "strings":                 {"errors", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"},
+       "sync":                    {"runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync/atomic"},
+       "sync/atomic":             {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "syscall":                 {"errors", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode/utf16"},
+       "text/template":           {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "math", "net/url", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "text/template/parse":     {"bytes", "errors", "fmt", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"},
+       "time":                    {"errors", "internal/syscall/windows/registry", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"},
+       "unicode":                 {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "unicode/utf16":           {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "unicode/utf8":            {"runtime", "runtime/internal/atomic", "runtime/internal/sys"},
+       "cmd/go":                  {"bufio", "bytes", "container/heap", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"},
 }
index c3aba7c584a20097ba81c7859724360c993cd784..4575b4426013800a9d137e6326738e2cee0dfbeb 100644 (file)
@@ -36,7 +36,8 @@ var pkgDeps = map[string][]string{
        // L0 is the lowest level, core, nearly unavoidable packages.
        "errors":                  {},
        "io":                      {"errors", "sync"},
-       "runtime":                 {"unsafe", "runtime/internal/atomic"},
+       "runtime":                 {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"},
+       "runtime/internal/sys":    {},
        "runtime/internal/atomic": {"unsafe"},
        "sync":        {"runtime", "sync/atomic", "unsafe"},
        "sync/atomic": {"unsafe"},
index 95173495c3add321ebf814dce08179cf4f39d841..9ea0eb0187b9ad21bc963c3e20c0156e88fde96f 100644 (file)
@@ -4,11 +4,14 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 const (
-       c0 = uintptr((8-ptrSize)/4*2860486313 + (ptrSize-4)/4*33054211828000289)
-       c1 = uintptr((8-ptrSize)/4*3267000013 + (ptrSize-4)/4*23344194077549503)
+       c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
+       c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
 )
 
 // type algorithms - known to compiler
@@ -301,7 +304,7 @@ func memclrBytes(b []byte) {
        memclr(s.array, uintptr(s.len))
 }
 
-const hashRandomBytes = ptrSize / 4 * 64
+const hashRandomBytes = sys.PtrSize / 4 * 64
 
 // used in asm_{386,amd64}.s to seed the hash function
 var aeskeysched [hashRandomBytes]byte
@@ -324,7 +327,7 @@ func init() {
                getRandomData(aeskeysched[:])
                return
        }
-       getRandomData((*[len(hashkey) * ptrSize]byte)(unsafe.Pointer(&hashkey))[:])
+       getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
        hashkey[0] |= 1 // make sure these numbers are odd
        hashkey[1] |= 1
        hashkey[2] |= 1
diff --git a/src/runtime/arch_386.go b/src/runtime/arch_386.go
deleted file mode 100644 (file)
index 75e94ec..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
-       thechar        = '8'
-       _BigEndian     = 0
-       _CacheLineSize = 64
-       _PhysPageSize  = goos_nacl*65536 + (1-goos_nacl)*4096 // 4k normally; 64k on NaCl
-       _PCQuantum     = 1
-       _Int64Align    = 4
-       hugePageSize   = 1 << 21
-       minFrameSize   = 0
-)
-
-type uintreg uint32
-type intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/arch_amd64.go b/src/runtime/arch_amd64.go
deleted file mode 100644 (file)
index d7721f7..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
-       thechar        = '6'
-       _BigEndian     = 0
-       _CacheLineSize = 64
-       _PhysPageSize  = 4096
-       _PCQuantum     = 1
-       _Int64Align    = 8
-       hugePageSize   = 1 << 21
-       minFrameSize   = 0
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_amd64p32.go b/src/runtime/arch_amd64p32.go
deleted file mode 100644 (file)
index aa8343a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
-       thechar        = '6'
-       _BigEndian     = 0
-       _CacheLineSize = 64
-       _PhysPageSize  = 65536*goos_nacl + 4096*(1-goos_nacl)
-       _PCQuantum     = 1
-       _Int64Align    = 8
-       hugePageSize   = 1 << 21
-       minFrameSize   = 0
-)
-
-type uintreg uint64
-type intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/arch_arm.go b/src/runtime/arch_arm.go
deleted file mode 100644 (file)
index aa3e180..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
-       thechar        = '5'
-       _BigEndian     = 0
-       _CacheLineSize = 32
-       _PhysPageSize  = 65536*goos_nacl + 4096*(1-goos_nacl)
-       _PCQuantum     = 4
-       _Int64Align    = 4
-       hugePageSize   = 0
-       minFrameSize   = 4
-)
-
-type uintreg uint32
-type intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/arch_arm64.go b/src/runtime/arch_arm64.go
deleted file mode 100644 (file)
index f01c26d..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
-       thechar        = '7'
-       _BigEndian     = 0
-       _CacheLineSize = 32
-       _PhysPageSize  = 65536
-       _PCQuantum     = 4
-       _Int64Align    = 8
-       hugePageSize   = 0
-       minFrameSize   = 8
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_ppc64.go b/src/runtime/arch_ppc64.go
deleted file mode 100644 (file)
index 273cc56..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
-       thechar        = '9'
-       _BigEndian     = 1
-       _CacheLineSize = 64
-       _PhysPageSize  = 65536
-       _PCQuantum     = 4
-       _Int64Align    = 8
-       hugePageSize   = 0
-       minFrameSize   = 8
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_ppc64le.go b/src/runtime/arch_ppc64le.go
deleted file mode 100644 (file)
index e4eb9e5..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
-       thechar        = '9'
-       _BigEndian     = 0
-       _CacheLineSize = 64
-       _PhysPageSize  = 65536
-       _PCQuantum     = 4
-       _Int64Align    = 8
-       hugePageSize   = 0
-       minFrameSize   = 8
-)
-
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
index a01548a32f11b4e9dc202de4cbb772e88d97be4c..08be1427850308f2e63b4407a9c55cbca97535ab 100644 (file)
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // Call from Go to C.
 //go:nosplit
@@ -220,22 +223,22 @@ func cgocallbackg1() {
        case "arm":
                // On arm, stack frame is two words and there's a saved LR between
                // SP and the stack frame and between the stack frame and the arguments.
-               cb = (*args)(unsafe.Pointer(sp + 4*ptrSize))
+               cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
        case "arm64":
                // On arm64, stack frame is four words and there's a saved LR between
                // SP and the stack frame and between the stack frame and the arguments.
-               cb = (*args)(unsafe.Pointer(sp + 5*ptrSize))
+               cb = (*args)(unsafe.Pointer(sp + 5*sys.PtrSize))
        case "amd64":
                // On amd64, stack frame is one word, plus caller PC.
                if framepointer_enabled {
                        // In this case, there's also saved BP.
-                       cb = (*args)(unsafe.Pointer(sp + 3*ptrSize))
+                       cb = (*args)(unsafe.Pointer(sp + 3*sys.PtrSize))
                        break
                }
-               cb = (*args)(unsafe.Pointer(sp + 2*ptrSize))
+               cb = (*args)(unsafe.Pointer(sp + 2*sys.PtrSize))
        case "386":
                // On 386, stack frame is three words, plus caller PC.
-               cb = (*args)(unsafe.Pointer(sp + 4*ptrSize))
+               cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
        case "ppc64", "ppc64le":
                // On ppc64, the callback arguments are in the arguments area of
                // cgocallback's stack frame. The stack looks like this:
@@ -252,7 +255,7 @@ func cgocallbackg1() {
                // | cgocallback_gofunc +------------------------------+ <- sp + minFrameSize
                // |                    | fixed frame area             |
                // +--------------------+------------------------------+ <- sp
-               cb = (*args)(unsafe.Pointer(sp + 2*minFrameSize + 2*ptrSize))
+               cb = (*args)(unsafe.Pointer(sp + 2*sys.MinFrameSize + 2*sys.PtrSize))
        }
 
        // Invoke callback.
@@ -291,7 +294,7 @@ func unwindm(restore *bool) {
        default:
                throw("unwindm not implemented")
        case "386", "amd64", "arm", "ppc64", "ppc64le":
-               sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + minFrameSize))
+               sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
        case "arm64":
                sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
        }
@@ -437,7 +440,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool) {
                if inheap(uintptr(unsafe.Pointer(it))) {
                        panic(errorString(cgoCheckPointerFail))
                }
-               p = *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + ptrSize))
+               p = *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + sys.PtrSize))
                if !cgoIsGoPointer(p) {
                        return
                }
@@ -505,9 +508,9 @@ func cgoCheckUnknownPointer(p unsafe.Pointer) {
                        return
                }
                n := span.elemsize
-               for i := uintptr(0); i < n; i += ptrSize {
+               for i := uintptr(0); i < n; i += sys.PtrSize {
                        bits := hbits.bits()
-                       if i >= 2*ptrSize && bits&bitMarked == 0 {
+                       if i >= 2*sys.PtrSize && bits&bitMarked == 0 {
                                // No more possible pointers.
                                break
                        }
index ad2bf1c628785ececd6a7c308a9b88f7568cbf32..6a4eae607a499102241b34b9dfac98bc4e46f3be 100644 (file)
@@ -8,6 +8,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -115,7 +116,7 @@ func GostringW(w []uint16) (s string) {
 var Gostringnocopy = gostringnocopy
 var Maxstring = &maxstring
 
-type Uintreg uintreg
+type Uintreg sys.Uintreg
 
 var Open = open
 var Close = closefd
@@ -125,7 +126,7 @@ var Write = write
 func Envs() []string     { return envs }
 func SetEnvs(e []string) { envs = e }
 
-var BigEndian = _BigEndian
+var BigEndian = sys.BigEndian
 
 // For benchmarking.
 
@@ -156,7 +157,7 @@ func BenchSetType(n int, x interface{}) {
        })
 }
 
-const PtrSize = ptrSize
+const PtrSize = sys.PtrSize
 
 var TestingAssertE2I2GC = &testingAssertE2I2GC
 var TestingAssertE2T2GC = &testingAssertE2T2GC
index 5a5d432f6244f2aefc5ab93cd96bf968df4bae38..564318c7cdcd9295edb73920246eaa162e716393 100644 (file)
@@ -130,6 +130,8 @@ of the run-time system.
 */
 package runtime
 
+import "runtime/internal/sys"
+
 // Caller reports file and line number information about function invocations on
 // the calling goroutine's stack.  The argument skip is the number of stack frames
 // to ascend, with 0 identifying the caller of Caller.  (For historical reasons the
@@ -199,20 +201,20 @@ func GOROOT() string {
        if s != "" {
                return s
        }
-       return defaultGoroot
+       return sys.DefaultGoroot
 }
 
 // Version returns the Go tree's version string.
 // It is either the commit hash and date at the time of the build or,
 // when possible, a release tag like "go1.3".
 func Version() string {
-       return theVersion
+       return sys.TheVersion
 }
 
 // GOOS is the running program's operating system target:
 // one of darwin, freebsd, linux, and so on.
-const GOOS string = theGoos
+const GOOS string = sys.TheGoos
 
 // GOARCH is the running program's architecture target:
 // 386, amd64, or arm.
-const GOARCH string = theGoarch
+const GOARCH string = sys.TheGoarch
index 667367891c9bf72164c66cd5c4b85f3fa5fcd974..056396c518b466c5905dc4b967d970964db19333 100644 (file)
@@ -55,6 +55,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -96,7 +97,7 @@ const (
        oldIterator = 2 // there may be an iterator using oldbuckets
 
        // sentinel bucket ID for iterator checks
-       noCheck = 1<<(8*ptrSize) - 1
+       noCheck = 1<<(8*sys.PtrSize) - 1
 )
 
 // A header for a Go map.
@@ -160,7 +161,7 @@ func evacuated(b *bmap) bool {
 }
 
 func (b *bmap) overflow(t *maptype) *bmap {
-       return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize))
+       return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
 }
 
 func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
@@ -168,7 +169,7 @@ func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
                h.createOverflow()
                *h.overflow[0] = append(*h.overflow[0], ovf)
        }
-       *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize)) = ovf
+       *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
 }
 
 func (h *hmap) createOverflow() {
@@ -201,11 +202,11 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
        }
 
        // check compiler's and reflect's math
-       if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
+       if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
                t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
                throw("key size wrong")
        }
-       if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
+       if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
                t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
                throw("value size wrong")
        }
@@ -293,7 +294,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
                        b = oldb
                }
        }
-       top := uint8(hash >> (ptrSize*8 - 8))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
        if top < minTopHash {
                top += minTopHash
        }
@@ -344,7 +345,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
                        b = oldb
                }
        }
-       top := uint8(hash >> (ptrSize*8 - 8))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
        if top < minTopHash {
                top += minTopHash
        }
@@ -387,7 +388,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
                        b = oldb
                }
        }
-       top := uint8(hash >> (ptrSize*8 - 8))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
        if top < minTopHash {
                top += minTopHash
        }
@@ -444,7 +445,7 @@ again:
                growWork(t, h, bucket)
        }
        b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
-       top := uint8(hash >> (ptrSize*8 - 8))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
        if top < minTopHash {
                top += minTopHash
        }
@@ -541,7 +542,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
                growWork(t, h, bucket)
        }
        b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
-       top := uint8(hash >> (ptrSize*8 - 8))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
        if top < minTopHash {
                top += minTopHash
        }
@@ -594,7 +595,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
                return
        }
 
-       if unsafe.Sizeof(hiter{})/ptrSize != 12 {
+       if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
                throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
        }
        it.t = t
@@ -865,7 +866,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
                                                } else {
                                                        hash &^= newbit
                                                }
-                                               top = uint8(hash >> (ptrSize*8 - 8))
+                                               top = uint8(hash >> (sys.PtrSize*8 - 8))
                                                if top < minTopHash {
                                                        top += minTopHash
                                                }
index 9f310f8bf366b24161135d16388732ae0aa447e1..454256381fb75fbb6489bb51882115d906aced7a 100644 (file)
@@ -6,6 +6,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -196,12 +197,12 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
                                if x == empty {
                                        continue
                                }
-                               k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+                               k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
                                if k.len != key.len {
                                        continue
                                }
                                if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
-                                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
+                                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
                                }
                        }
                        return atomic.Loadp(unsafe.Pointer(&zeroptr))
@@ -213,12 +214,12 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
                        if x == empty {
                                continue
                        }
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
                        if k.len != key.len {
                                continue
                        }
                        if k.str == key.str {
-                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
                        }
                        // check first 4 bytes
                        // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
@@ -237,9 +238,9 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
                        keymaybe = i
                }
                if keymaybe != bucketCnt {
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
                        if memeq(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
                        }
                }
                return atomic.Loadp(unsafe.Pointer(&zeroptr))
@@ -254,7 +255,7 @@ dohash:
                        b = oldb
                }
        }
-       top := uint8(hash >> (ptrSize*8 - 8))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
        if top < minTopHash {
                top += minTopHash
        }
@@ -264,12 +265,12 @@ dohash:
                        if x != top {
                                continue
                        }
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
                        if k.len != key.len {
                                continue
                        }
                        if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
                        }
                }
                b = b.overflow(t)
@@ -298,12 +299,12 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
                                if x == empty {
                                        continue
                                }
-                               k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+                               k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
                                if k.len != key.len {
                                        continue
                                }
                                if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
-                                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
+                                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
                                }
                        }
                        return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
@@ -315,12 +316,12 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
                        if x == empty {
                                continue
                        }
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
                        if k.len != key.len {
                                continue
                        }
                        if k.str == key.str {
-                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
                        }
                        // check first 4 bytes
                        if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@@ -337,9 +338,9 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
                        keymaybe = i
                }
                if keymaybe != bucketCnt {
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
                        if memeq(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
                        }
                }
                return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
@@ -354,7 +355,7 @@ dohash:
                        b = oldb
                }
        }
-       top := uint8(hash >> (ptrSize*8 - 8))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
        if top < minTopHash {
                top += minTopHash
        }
@@ -364,12 +365,12 @@ dohash:
                        if x != top {
                                continue
                        }
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
                        if k.len != key.len {
                                continue
                        }
                        if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
                        }
                }
                b = b.overflow(t)
index 0a62c6731dfc078863480d15f5e23cfc542c726f..dfceba337659533ca3ba868c9c16a9cdab487018 100644 (file)
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
 func runtime_debug_WriteHeapDump(fd uintptr) {
@@ -233,7 +236,7 @@ func dumpbv(cbv *bitvector, offset uintptr) {
        for i := uintptr(0); i < uintptr(bv.n); i++ {
                if bv.bytedata[i/8]>>(i%8)&1 == 1 {
                        dumpint(fieldKindPtr)
-                       dumpint(uint64(offset + i*ptrSize))
+                       dumpint(uint64(offset + i*sys.PtrSize))
                }
        }
 }
@@ -263,7 +266,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
        var bv bitvector
        if stkmap != nil && stkmap.n > 0 {
                bv = stackmapdata(stkmap, pcdata)
-               dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n*ptrSize)))
+               dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n*sys.PtrSize)))
        } else {
                bv.n = -1
        }
@@ -288,7 +291,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
                dumpbv(&child.args, child.argoff)
        } else {
                // conservative - everything might be a pointer
-               for off := child.argoff; off < child.argoff+child.arglen; off += ptrSize {
+               for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
                        dumpint(fieldKindPtr)
                        dumpint(uint64(off))
                }
@@ -297,21 +300,21 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
        // Dump fields in the local vars section
        if stkmap == nil {
                // No locals information, dump everything.
-               for off := child.arglen; off < s.varp-s.sp; off += ptrSize {
+               for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
                        dumpint(fieldKindPtr)
                        dumpint(uint64(off))
                }
        } else if stkmap.n < 0 {
                // Locals size information, dump just the locals.
                size := uintptr(-stkmap.n)
-               for off := s.varp - size - s.sp; off < s.varp-s.sp; off += ptrSize {
+               for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
                        dumpint(fieldKindPtr)
                        dumpint(uint64(off))
                }
        } else if stkmap.n > 0 {
                // Locals bitmap information, scan just the pointers in
                // locals.
-               dumpbv(&bv, s.varp-uintptr(bv.n)*ptrSize-s.sp)
+               dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
        }
        dumpint(fieldKindEol)
 
@@ -489,11 +492,11 @@ func dumpparams() {
        } else {
                dumpbool(true) // big-endian ptrs
        }
-       dumpint(ptrSize)
+       dumpint(sys.PtrSize)
        dumpint(uint64(mheap_.arena_start))
        dumpint(uint64(mheap_.arena_used))
-       dumpint(thechar)
-       dumpstr(goexperiment)
+       dumpint(sys.TheChar)
+       dumpstr(sys.Goexperiment)
        dumpint(uint64(ncpu))
 }
 
@@ -704,7 +707,7 @@ func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
 
 func makeheapobjbv(p uintptr, size uintptr) bitvector {
        // Extend the temp buffer if necessary.
-       nptr := size / ptrSize
+       nptr := size / sys.PtrSize
        if uintptr(len(tmpbuf)) < nptr/8+1 {
                if tmpbuf != nil {
                        sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
index d4e8b8e69f1f9d5300ef4839fbdd51a6dc5f3afc..71dc865e0712cb0f2e2c54c573d5f3ed14d599c7 100644 (file)
@@ -6,6 +6,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -69,7 +70,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
                }
        }
 
-       m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*ptrSize, 0, &memstats.other_sys))
+       m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
        m.inter = inter
        m._type = typ
 
@@ -90,7 +91,7 @@ search:
                        t := &x.mhdr[j]
                        if t.mtyp == itype && (t.name == iname || *t.name == *iname) && t.pkgpath == ipkgpath {
                                if m != nil {
-                                       *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*ptrSize)) = t.ifn
+                                       *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn
                                }
                                goto nextimethod
                        }
diff --git a/src/runtime/internal/sys/arch_386.go b/src/runtime/internal/sys/arch_386.go
new file mode 100644 (file)
index 0000000..15c8e84
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+       TheChar       = '8'
+       BigEndian     = 0
+       CacheLineSize = 64
+       PhysPageSize  = GoosNacl*65536 + (1-GoosNacl)*4096 // 4k normally; 64k on NaCl
+       PCQuantum     = 1
+       Int64Align    = 4
+       HugePageSize  = 1 << 21
+       MinFrameSize  = 0
+)
+
+type Uintreg uint32
+type Intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_amd64.go b/src/runtime/internal/sys/arch_amd64.go
new file mode 100644 (file)
index 0000000..bc9002c
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+       TheChar       = '6'
+       BigEndian     = 0
+       CacheLineSize = 64
+       PhysPageSize  = 4096
+       PCQuantum     = 1
+       Int64Align    = 8
+       HugePageSize  = 1 << 21
+       MinFrameSize  = 0
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_amd64p32.go b/src/runtime/internal/sys/arch_amd64p32.go
new file mode 100644 (file)
index 0000000..d7c185f
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+       TheChar       = '6'
+       BigEndian     = 0
+       CacheLineSize = 64
+       PhysPageSize  = 65536*GoosNacl + 4096*(1-GoosNacl)
+       PCQuantum     = 1
+       Int64Align    = 8
+       HugePageSize  = 1 << 21
+       MinFrameSize  = 0
+)
+
+type Uintreg uint64
+type Intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_arm.go b/src/runtime/internal/sys/arch_arm.go
new file mode 100644 (file)
index 0000000..d395ac5
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+       TheChar       = '5'
+       BigEndian     = 0
+       CacheLineSize = 32
+       PhysPageSize  = 65536*GoosNacl + 4096*(1-GoosNacl)
+       PCQuantum     = 4
+       Int64Align    = 4
+       HugePageSize  = 0
+       MinFrameSize  = 4
+)
+
+type Uintreg uint32
+type Intptr int32 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_arm64.go b/src/runtime/internal/sys/arch_arm64.go
new file mode 100644 (file)
index 0000000..bd7e41d
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+       TheChar       = '7'
+       BigEndian     = 0
+       CacheLineSize = 32
+       PhysPageSize  = 65536
+       PCQuantum     = 4
+       Int64Align    = 8
+       HugePageSize  = 0
+       MinFrameSize  = 8
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_ppc64.go b/src/runtime/internal/sys/arch_ppc64.go
new file mode 100644 (file)
index 0000000..9b13415
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+       TheChar       = '9'
+       BigEndian     = 1
+       CacheLineSize = 64
+       PhysPageSize  = 65536
+       PCQuantum     = 4
+       Int64Align    = 8
+       HugePageSize  = 0
+       MinFrameSize  = 8
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/internal/sys/arch_ppc64le.go b/src/runtime/internal/sys/arch_ppc64le.go
new file mode 100644 (file)
index 0000000..db9b2aa
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+       TheChar       = '9'
+       BigEndian     = 0
+       CacheLineSize = 64
+       PhysPageSize  = 65536
+       PCQuantum     = 4
+       Int64Align    = 8
+       HugePageSize  = 0
+       MinFrameSize  = 8
+)
+
+type Uintreg uint64
+type Intptr int64 // TODO(rsc): remove
similarity index 81%
rename from src/runtime/gengoos.go
rename to src/runtime/internal/sys/gengoos.go
index 06621c8dba2e320ec096cb808de38c7b2c9cef4e..55d991cac37bc9e26a3ac89e18d38a3d0bb16487 100644 (file)
@@ -18,7 +18,7 @@ import (
 var gooses, goarches []string
 
 func main() {
-       data, err := ioutil.ReadFile("../go/build/syslist.go")
+       data, err := ioutil.ReadFile("../../../go/build/syslist.go")
        if err != nil {
                log.Fatal(err)
        }
@@ -49,14 +49,14 @@ func main() {
                if target == "linux" {
                        fmt.Fprintf(&buf, "// +build !android\n\n") // must explicitly exclude android for linux
                }
-               fmt.Fprintf(&buf, "package runtime\n\n")
-               fmt.Fprintf(&buf, "const theGoos = `%s`\n\n", target)
+               fmt.Fprintf(&buf, "package sys\n\n")
+               fmt.Fprintf(&buf, "const TheGoos = `%s`\n\n", target)
                for _, goos := range gooses {
                        value := 0
                        if goos == target {
                                value = 1
                        }
-                       fmt.Fprintf(&buf, "const goos_%s = %d\n", goos, value)
+                       fmt.Fprintf(&buf, "const Goos%s = %d\n", strings.Title(goos), value)
                }
                err := ioutil.WriteFile("zgoos_"+target+".go", buf.Bytes(), 0666)
                if err != nil {
@@ -67,14 +67,14 @@ func main() {
        for _, target := range goarches {
                var buf bytes.Buffer
                fmt.Fprintf(&buf, "// generated by gengoos.go using 'go generate'\n\n")
-               fmt.Fprintf(&buf, "package runtime\n\n")
-               fmt.Fprintf(&buf, "const theGoarch = `%s`\n\n", target)
+               fmt.Fprintf(&buf, "package sys\n\n")
+               fmt.Fprintf(&buf, "const TheGoarch = `%s`\n\n", target)
                for _, goarch := range goarches {
                        value := 0
                        if goarch == target {
                                value = 1
                        }
-                       fmt.Fprintf(&buf, "const goarch_%s = %d\n", goarch, value)
+                       fmt.Fprintf(&buf, "const Goarch%s = %d\n", strings.Title(goarch), value)
                }
                err := ioutil.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666)
                if err != nil {
diff --git a/src/runtime/internal/sys/stubs.go b/src/runtime/internal/sys/stubs.go
new file mode 100644 (file)
index 0000000..0a94502
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+// Declarations for runtime services implemented in C or assembly.
+
+const PtrSize = 4 << (^uintptr(0) >> 63)           // unsafe.Sizeof(uintptr(0)) but an ideal const
+const RegSize = 4 << (^Uintreg(0) >> 63)           // unsafe.Sizeof(uintreg(0)) but an ideal const
+const SpAlign = 1*(1-GoarchArm64) + 16*GoarchArm64 // SP alignment: 1 normally, 16 for ARM64
diff --git a/src/runtime/internal/sys/sys.go b/src/runtime/internal/sys/sys.go
new file mode 100644 (file)
index 0000000..15ad7f5
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright 2015 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package sys contains system- and configuration- and architecture-specific
+// constants used by the runtime.
+package sys
+
+// The next line makes 'go generate' write the zgen_*.go files with
+// per-OS and per-arch information, including constants
+// named goos_$GOOS and goarch_$GOARCH for every
+// known GOOS and GOARCH. The constant is 1 on the
+// current system, 0 otherwise; multiplying by them is
+// useful for defining GOOS- or GOARCH-specific constants.
+//go:generate go run gengoos.go
diff --git a/src/runtime/internal/sys/zgoarch_386.go b/src/runtime/internal/sys/zgoarch_386.go
new file mode 100644 (file)
index 0000000..3ad2445
--- /dev/null
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `386`
+
+const Goarch386 = 1
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_amd64.go b/src/runtime/internal/sys/zgoarch_amd64.go
new file mode 100644 (file)
index 0000000..7c858e3
--- /dev/null
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `amd64`
+
+const Goarch386 = 0
+const GoarchAmd64 = 1
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_amd64p32.go b/src/runtime/internal/sys/zgoarch_amd64p32.go
new file mode 100644 (file)
index 0000000..772031c
--- /dev/null
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `amd64p32`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 1
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_arm.go b/src/runtime/internal/sys/zgoarch_arm.go
new file mode 100644 (file)
index 0000000..276e8a8
--- /dev/null
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `arm`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 1
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_arm64.go b/src/runtime/internal/sys/zgoarch_arm64.go
new file mode 100644 (file)
index 0000000..d124ec0
--- /dev/null
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `arm64`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 1
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_ppc64.go b/src/runtime/internal/sys/zgoarch_ppc64.go
new file mode 100644 (file)
index 0000000..06f78b2
--- /dev/null
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `ppc64`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 1
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoarch_ppc64le.go b/src/runtime/internal/sys/zgoarch_ppc64le.go
new file mode 100644 (file)
index 0000000..50b56db
--- /dev/null
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `ppc64le`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 1
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 0
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff --git a/src/runtime/internal/sys/zgoos_android.go b/src/runtime/internal/sys/zgoos_android.go
new file mode 100644 (file)
index 0000000..03d9176
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `android`
+
+const GoosAndroid = 1
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_darwin.go b/src/runtime/internal/sys/zgoos_darwin.go
new file mode 100644 (file)
index 0000000..eb2efeb
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `darwin`
+
+const GoosAndroid = 0
+const GoosDarwin = 1
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_dragonfly.go b/src/runtime/internal/sys/zgoos_dragonfly.go
new file mode 100644 (file)
index 0000000..403cf65
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `dragonfly`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 1
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_freebsd.go b/src/runtime/internal/sys/zgoos_freebsd.go
new file mode 100644 (file)
index 0000000..632d5db
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `freebsd`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 1
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_linux.go b/src/runtime/internal/sys/zgoos_linux.go
new file mode 100644 (file)
index 0000000..2d43869
--- /dev/null
@@ -0,0 +1,19 @@
+// generated by gengoos.go using 'go generate'
+
+// +build !android
+
+package sys
+
+const TheGoos = `linux`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 1
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_nacl.go b/src/runtime/internal/sys/zgoos_nacl.go
new file mode 100644 (file)
index 0000000..a56b6ef
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `nacl`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 1
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_netbsd.go b/src/runtime/internal/sys/zgoos_netbsd.go
new file mode 100644 (file)
index 0000000..46fd0a7
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `netbsd`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 1
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_openbsd.go b/src/runtime/internal/sys/zgoos_openbsd.go
new file mode 100644 (file)
index 0000000..7ee650a
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `openbsd`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 1
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_plan9.go b/src/runtime/internal/sys/zgoos_plan9.go
new file mode 100644 (file)
index 0000000..162e7f6
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `plan9`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 1
+const GoosSolaris = 0
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_solaris.go b/src/runtime/internal/sys/zgoos_solaris.go
new file mode 100644 (file)
index 0000000..b2a8f98
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `solaris`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 1
+const GoosWindows = 0
diff --git a/src/runtime/internal/sys/zgoos_windows.go b/src/runtime/internal/sys/zgoos_windows.go
new file mode 100644 (file)
index 0000000..817ec79
--- /dev/null
@@ -0,0 +1,17 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoos = `windows`
+
+const GoosAndroid = 0
+const GoosDarwin = 0
+const GoosDragonfly = 0
+const GoosFreebsd = 0
+const GoosLinux = 0
+const GoosNacl = 0
+const GoosNetbsd = 0
+const GoosOpenbsd = 0
+const GoosPlan9 = 0
+const GoosSolaris = 0
+const GoosWindows = 1
index efaa46f3528626cfcabc2574a4259978a0d05154..6430511d7db86d310a2666b713590f912bb4a232 100644 (file)
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 const (
        debugMalloc = false
@@ -143,7 +146,7 @@ const (
        //   windows/32       | 4KB        | 3
        //   windows/64       | 8KB        | 2
        //   plan9            | 4KB        | 3
-       _NumStackOrders = 4 - ptrSize/4*goos_windows - 1*goos_plan9
+       _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
 
        // Number of bits in page to span calculations (4k pages).
        // On Windows 64-bit we limit the arena to 32GB or 35 bits.
@@ -155,7 +158,7 @@ const (
        // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
        // but as most devices have less than 4GB of physical memory anyway, we
        // try to be conservative here, and only ask for a 2GB heap.
-       _MHeapMap_TotalBits = (_64bit*goos_windows)*35 + (_64bit*(1-goos_windows)*(1-goos_darwin*goarch_arm64))*39 + goos_darwin*goarch_arm64*31 + (1-_64bit)*32
+       _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
        _MHeapMap_Bits      = _MHeapMap_TotalBits - _PageShift
 
        _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
@@ -228,7 +231,7 @@ func mallocinit() {
        // Set up the allocation arena, a contiguous area of memory where
        // allocated data will be found.  The arena begins with a bitmap large
        // enough to hold 4 bits per allocated word.
-       if ptrSize == 8 && (limit == 0 || limit > 1<<30) {
+       if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) {
                // On a 64-bit machine, allocate from a single contiguous reservation.
                // 512 GB (MaxMem) should be big enough for now.
                //
@@ -259,8 +262,8 @@ func mallocinit() {
                // translation buffers, the user address space is limited to 39 bits
                // On darwin/arm64, the address space is even smaller.
                arenaSize := round(_MaxMem, _PageSize)
-               bitmapSize = arenaSize / (ptrSize * 8 / 4)
-               spansSize = arenaSize / _PageSize * ptrSize
+               bitmapSize = arenaSize / (sys.PtrSize * 8 / 4)
+               spansSize = arenaSize / _PageSize * sys.PtrSize
                spansSize = round(spansSize, _PageSize)
                for i := 0; i <= 0x7f; i++ {
                        switch {
@@ -308,12 +311,12 @@ func mallocinit() {
                }
 
                for _, arenaSize := range arenaSizes {
-                       bitmapSize = _MaxArena32 / (ptrSize * 8 / 4)
-                       spansSize = _MaxArena32 / _PageSize * ptrSize
+                       bitmapSize = _MaxArena32 / (sys.PtrSize * 8 / 4)
+                       spansSize = _MaxArena32 / _PageSize * sys.PtrSize
                        if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
                                bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
                                arenaSize = bitmapSize * 8
-                               spansSize = arenaSize / _PageSize * ptrSize
+                               spansSize = arenaSize / _PageSize * sys.PtrSize
                        }
                        spansSize = round(spansSize, _PageSize)
 
@@ -368,7 +371,7 @@ func mallocinit() {
 // needed. This doesn't work well with the "let the kernel pick an address"
 // mode, so don't do that. Pick a high address instead.
 func sysReserveHigh(n uintptr, reserved *bool) unsafe.Pointer {
-       if ptrSize == 4 {
+       if sys.PtrSize == 4 {
                return sysReserve(nil, n, reserved)
        }
 
@@ -642,7 +645,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
                        x = unsafe.Pointer(v)
                        if flags&flagNoZero == 0 {
                                v.ptr().next = 0
-                               if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
+                               if size > 2*sys.PtrSize && ((*[2]uintptr)(x))[1] != 0 {
                                        memclr(unsafe.Pointer(v), size)
                                }
                        }
index f6e6c30648876afcb812eb960a0d812051948a12..f9553b9e14fd85266781760fee00d6dda5997495 100644 (file)
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // markwb is the mark-phase write barrier, the only barrier we have.
 // The rest of this file exists only to make calls to this function.
@@ -128,7 +131,7 @@ func writebarrierptr(dst *uintptr, src uintptr) {
        if !writeBarrierEnabled {
                return
        }
-       if src != 0 && (src < _PhysPageSize || src == poisonStack) {
+       if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
                systemstack(func() {
                        print("runtime: writebarrierptr *", dst, " = ", hex(src), "\n")
                        throw("bad pointer in write barrier")
@@ -144,7 +147,7 @@ func writebarrierptr_nostore(dst *uintptr, src uintptr) {
        if !writeBarrierEnabled {
                return
        }
-       if src != 0 && (src < _PhysPageSize || src == poisonStack) {
+       if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
                systemstack(func() { throw("bad pointer in write barrier") })
        }
        writebarrierptr_nostore1(dst, src)
@@ -195,15 +198,15 @@ func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
 //go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
 func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
        memmove(dst, src, size)
-       if !writeBarrierEnabled || typ.kind&kindNoPointers != 0 || size < ptrSize || !inheap(uintptr(dst)) {
+       if !writeBarrierEnabled || typ.kind&kindNoPointers != 0 || size < sys.PtrSize || !inheap(uintptr(dst)) {
                return
        }
 
-       if frag := -off & (ptrSize - 1); frag != 0 {
+       if frag := -off & (sys.PtrSize - 1); frag != 0 {
                dst = add(dst, frag)
                size -= frag
        }
-       heapBitsBulkBarrier(uintptr(dst), size&^(ptrSize-1))
+       heapBitsBulkBarrier(uintptr(dst), size&^(sys.PtrSize-1))
 }
 
 // callwritebarrier is invoked at the end of reflectcall, to execute
@@ -215,7 +218,7 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
 // not to be preempted before the write barriers have been run.
 //go:nosplit
 func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uintptr) {
-       if !writeBarrierEnabled || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < ptrSize || !inheap(uintptr(frame)) {
+       if !writeBarrierEnabled || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < sys.PtrSize || !inheap(uintptr(frame)) {
                return
        }
        heapBitsBulkBarrier(uintptr(add(frame, retoffset)), framesize-retoffset)
index ba123eafea4787bff8bed181ca1a57d73e8090e6..335d1d8251fce21da6168d675b05cfda63917ae8 100644 (file)
@@ -68,6 +68,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -75,8 +76,8 @@ const (
        bitPointer = 1 << 0
        bitMarked  = 1 << 4
 
-       heapBitsShift   = 1                 // shift offset between successive bitPointer or bitMarked entries
-       heapBitmapScale = ptrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
+       heapBitsShift   = 1                     // shift offset between successive bitPointer or bitMarked entries
+       heapBitmapScale = sys.PtrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
 
        // all mark/pointer bits in a byte
        bitMarkedAll  = bitMarked | bitMarked<<heapBitsShift | bitMarked<<(2*heapBitsShift) | bitMarked<<(3*heapBitsShift)
@@ -139,7 +140,7 @@ func (h *mheap) mapBits(arena_used uintptr) {
 
        n := (arena_used - mheap_.arena_start) / heapBitmapScale
        n = round(n, bitmapChunk)
-       n = round(n, _PhysPageSize)
+       n = round(n, sys.PhysPageSize)
        if h.bitmap_mapped >= n {
                return
        }
@@ -164,7 +165,7 @@ type heapBits struct {
 //go:nosplit
 func heapBitsForAddr(addr uintptr) heapBits {
        // 2 bits per work, 4 pairs per byte, and a mask is hard coded.
-       off := (addr - mheap_.arena_start) / ptrSize
+       off := (addr - mheap_.arena_start) / sys.PtrSize
        return heapBits{(*uint8)(unsafe.Pointer(mheap_.arena_start - off/4 - 1)), uint32(off & 3)}
 }
 
@@ -328,7 +329,7 @@ func (h heapBits) isPointer() bool {
 // far into the bitmap.
 // h must describe the initial word of the object.
 func (h heapBits) hasPointers(size uintptr) bool {
-       if size == ptrSize { // 1-word objects are always pointers
+       if size == sys.PtrSize { // 1-word objects are always pointers
                return true
        }
        // Otherwise, at least a 2-word object, and at least 2-word aligned,
@@ -339,7 +340,7 @@ func (h heapBits) hasPointers(size uintptr) bool {
        if b&(bitPointer|bitPointer<<heapBitsShift) != 0 {
                return true
        }
-       if size == 2*ptrSize {
+       if size == 2*sys.PtrSize {
                return false
        }
        // At least a 4-word object. Check scan bit (aka marked bit) in third word.
@@ -354,7 +355,7 @@ func (h heapBits) hasPointers(size uintptr) bool {
 // checkmark bit varies by size.
 // h must describe the initial word of the object.
 func (h heapBits) isCheckmarked(size uintptr) bool {
-       if size == ptrSize {
+       if size == sys.PtrSize {
                return (*h.bitp>>h.shift)&bitPointer != 0
        }
        // All multiword objects are 2-word aligned,
@@ -369,7 +370,7 @@ func (h heapBits) isCheckmarked(size uintptr) bool {
 // checkmark bit varies by size.
 // h must describe the initial word of the object.
 func (h heapBits) setCheckmarked(size uintptr) {
-       if size == ptrSize {
+       if size == sys.PtrSize {
                atomic.Or8(h.bitp, bitPointer<<h.shift)
                return
        }
@@ -395,7 +396,7 @@ func (h heapBits) setCheckmarked(size uintptr) {
 //
 //go:nosplit
 func heapBitsBulkBarrier(p, size uintptr) {
-       if (p|size)&(ptrSize-1) != 0 {
+       if (p|size)&(sys.PtrSize-1) != 0 {
                throw("heapBitsBulkBarrier: unaligned arguments")
        }
        if !writeBarrierEnabled {
@@ -430,7 +431,7 @@ func heapBitsBulkBarrier(p, size uintptr) {
        }
 
        h := heapBitsForAddr(p)
-       for i := uintptr(0); i < size; i += ptrSize {
+       for i := uintptr(0); i < size; i += sys.PtrSize {
                if h.isPointer() {
                        x := (*uintptr)(unsafe.Pointer(p + i))
                        writebarrierptr_nostore(x, *x)
@@ -470,8 +471,8 @@ func typeBitsBulkBarrier(typ *_type, p, size uintptr) {
        }
        ptrmask := typ.gcdata
        var bits uint32
-       for i := uintptr(0); i < typ.ptrdata; i += ptrSize {
-               if i&(ptrSize*8-1) == 0 {
+       for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
+               if i&(sys.PtrSize*8-1) == 0 {
                        bits = uint32(*ptrmask)
                        ptrmask = addb(ptrmask, 1)
                } else {
@@ -498,7 +499,7 @@ func (h heapBits) initSpan(size, n, total uintptr) {
                throw("initSpan: unaligned length")
        }
        nbyte := total / heapBitmapScale
-       if ptrSize == 8 && size == ptrSize {
+       if sys.PtrSize == 8 && size == sys.PtrSize {
                end := h.bitp
                bitp := subtractb(end, nbyte-1)
                for {
@@ -517,7 +518,7 @@ func (h heapBits) initSpan(size, n, total uintptr) {
 // It clears the checkmark bits, which are set to 1 in normal operation.
 func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
        // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
-       if ptrSize == 8 && size == ptrSize {
+       if sys.PtrSize == 8 && size == sys.PtrSize {
                // Checkmark bit is type bit, bottom bit of every 2-bit entry.
                // Only possible on 64-bit system, since minimum size is 8.
                // Must clear type bit (checkmark bit) of every word.
@@ -531,7 +532,7 @@ func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
        }
        for i := uintptr(0); i < n; i++ {
                *h.bitp &^= bitMarked << (heapBitsShift + h.shift)
-               h = h.forward(size / ptrSize)
+               h = h.forward(size / sys.PtrSize)
        }
 }
 
@@ -541,7 +542,7 @@ func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
 // but consulted by typedmemmove.)
 func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
        // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
-       if ptrSize == 8 && size == ptrSize {
+       if sys.PtrSize == 8 && size == sys.PtrSize {
                // Checkmark bit is type bit, bottom bit of every 2-bit entry.
                // Only possible on 64-bit system, since minimum size is 8.
                // Must clear type bit (checkmark bit) of every word.
@@ -566,7 +567,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
        switch {
        default:
                throw("heapBitsSweepSpan")
-       case ptrSize == 8 && size == ptrSize:
+       case sys.PtrSize == 8 && size == sys.PtrSize:
                // Consider mark bits in all four 2-bit entries of each bitmap byte.
                bitp := h.bitp
                for i := uintptr(0); i < n; i += 4 {
@@ -579,28 +580,28 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
                        if x&bitMarked != 0 {
                                x &^= bitMarked
                        } else {
-                               f(base + i*ptrSize)
+                               f(base + i*sys.PtrSize)
                        }
                        if x&(bitMarked<<heapBitsShift) != 0 {
                                x &^= bitMarked << heapBitsShift
                        } else {
-                               f(base + (i+1)*ptrSize)
+                               f(base + (i+1)*sys.PtrSize)
                        }
                        if x&(bitMarked<<(2*heapBitsShift)) != 0 {
                                x &^= bitMarked << (2 * heapBitsShift)
                        } else {
-                               f(base + (i+2)*ptrSize)
+                               f(base + (i+2)*sys.PtrSize)
                        }
                        if x&(bitMarked<<(3*heapBitsShift)) != 0 {
                                x &^= bitMarked << (3 * heapBitsShift)
                        } else {
-                               f(base + (i+3)*ptrSize)
+                               f(base + (i+3)*sys.PtrSize)
                        }
                        *bitp = uint8(x)
                        bitp = subtract1(bitp)
                }
 
-       case size%(4*ptrSize) == 0:
+       case size%(4*sys.PtrSize) == 0:
                // Mark bit is in first word of each object.
                // Each object starts at bit 0 of a heap bitmap byte.
                bitp := h.bitp
@@ -617,7 +618,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
                        bitp = subtractb(bitp, step)
                }
 
-       case size%(4*ptrSize) == 2*ptrSize:
+       case size%(4*sys.PtrSize) == 2*sys.PtrSize:
                // Mark bit is in first word of each object,
                // but every other object starts halfway through a heap bitmap byte.
                // Unroll loop 2x to handle alternating shift count and step size.
@@ -631,7 +632,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
                        } else {
                                x &^= bitMarked | bitPointer | (bitMarked|bitPointer)<<heapBitsShift
                                f(base + i*size)
-                               if size > 2*ptrSize {
+                               if size > 2*sys.PtrSize {
                                        x = 0
                                }
                        }
@@ -646,7 +647,7 @@ func heapBitsSweepSpan(base, size, n uintptr, f func(uintptr)) {
                        } else {
                                x &^= (bitMarked|bitPointer)<<(2*heapBitsShift) | (bitMarked|bitPointer)<<(3*heapBitsShift)
                                f(base + (i+1)*size)
-                               if size > 2*ptrSize {
+                               if size > 2*sys.PtrSize {
                                        *subtract1(bitp) = 0
                                }
                        }
@@ -686,7 +687,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
        // The checks for size == ptrSize and size == 2*ptrSize can therefore
        // assume that dataSize == size without checking it explicitly.
 
-       if ptrSize == 8 && size == ptrSize {
+       if sys.PtrSize == 8 && size == sys.PtrSize {
                // It's one word and it has pointers, it must be a pointer.
                // In general we'd need an atomic update here if the
                // concurrent GC were marking objects in this span,
@@ -712,8 +713,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
        // This is called out as a special case primarily for 32-bit systems,
        // so that on 32-bit systems the code below can assume all objects
        // are 4-word aligned (because they're all 16-byte aligned).
-       if size == 2*ptrSize {
-               if typ.size == ptrSize {
+       if size == 2*sys.PtrSize {
+               if typ.size == sys.PtrSize {
                        // We're allocating a block big enough to hold two pointers.
                        // On 64-bit, that means the actual object must be two pointers,
                        // or else we'd have used the one-pointer-sized block.
@@ -722,7 +723,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
                        // just the smallest block available. Distinguish by checking dataSize.
                        // (In general the number of instances of typ being allocated is
                        // dataSize/typ.size.)
-                       if ptrSize == 4 && dataSize == ptrSize {
+                       if sys.PtrSize == 4 && dataSize == sys.PtrSize {
                                // 1 pointer.
                                if gcphase == _GCoff {
                                        *h.bitp |= bitPointer << h.shift
@@ -741,7 +742,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
                }
                // Otherwise typ.size must be 2*ptrSize, and typ.kind&kindGCProg == 0.
                if doubleCheck {
-                       if typ.size != 2*ptrSize || typ.kind&kindGCProg != 0 {
+                       if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
                                print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
                                throw("heapBitsSetType")
                        }
@@ -842,8 +843,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
                // Filling in bits for an array of typ.
                // Set up for repetition of ptrmask during main loop.
                // Note that ptrmask describes only a prefix of
-               const maxBits = ptrSize*8 - 7
-               if typ.ptrdata/ptrSize <= maxBits {
+               const maxBits = sys.PtrSize*8 - 7
+               if typ.ptrdata/sys.PtrSize <= maxBits {
                        // Entire ptrmask fits in uintptr with room for a byte fragment.
                        // Load into pbits and never read from ptrmask again.
                        // This is especially important when the ptrmask has
@@ -854,12 +855,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
                        // Accumulate ptrmask into b.
                        // ptrmask is sized to describe only typ.ptrdata, but we record
                        // it as describing typ.size bytes, since all the high bits are zero.
-                       nb = typ.ptrdata / ptrSize
+                       nb = typ.ptrdata / sys.PtrSize
                        for i := uintptr(0); i < nb; i += 8 {
                                b |= uintptr(*p) << i
                                p = add1(p)
                        }
-                       nb = typ.size / ptrSize
+                       nb = typ.size / sys.PtrSize
 
                        // Replicate ptrmask to fill entire pbits uintptr.
                        // Doubling and truncating is fewer steps than
@@ -870,7 +871,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
                        pbits = b
                        endnb = nb
                        if nb+nb <= maxBits {
-                               for endnb <= ptrSize*8 {
+                               for endnb <= sys.PtrSize*8 {
                                        pbits |= pbits << endnb
                                        endnb += endnb
                                }
@@ -887,9 +888,9 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
                        endp = nil
                } else {
                        // Ptrmask is larger. Read it multiple times.
-                       n := (typ.ptrdata/ptrSize+7)/8 - 1
+                       n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
                        endp = addb(ptrmask, n)
-                       endnb = typ.size/ptrSize - n*8
+                       endnb = typ.size/sys.PtrSize - n*8
                }
        }
        if p != nil {
@@ -900,12 +901,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
 
        if typ.size == dataSize {
                // Single entry: can stop once we reach the non-pointer data.
-               nw = typ.ptrdata / ptrSize
+               nw = typ.ptrdata / sys.PtrSize
        } else {
                // Repeated instances of typ in an array.
                // Have to process first N-1 entries in full, but can stop
                // once we reach the non-pointer data in the final entry.
-               nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / ptrSize
+               nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
        }
        if nw == 0 {
                // No pointers! Caller was supposed to check.
@@ -945,7 +946,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
                b >>= 4
                nb -= 4
 
-       case ptrSize == 8 && h.shift == 2:
+       case sys.PtrSize == 8 && h.shift == 2:
                // Ptrmask and heap bitmap are misaligned.
                // The bits for the first two words are in a byte shared with another object
                // and must be updated atomically.
@@ -1058,7 +1059,7 @@ Phase3:
        }
 
        // Change nw from counting possibly-pointer words to total words in allocation.
-       nw = size / ptrSize
+       nw = size / sys.PtrSize
 
        // Write whole bitmap bytes.
        // The first is hb, the rest are zero.
@@ -1105,11 +1106,11 @@ Phase4:
                // Double-check that bits to be written were written correctly.
                // Does not check that other bits were not written, unfortunately.
                h := heapBitsForAddr(x)
-               nptr := typ.ptrdata / ptrSize
-               ndata := typ.size / ptrSize
+               nptr := typ.ptrdata / sys.PtrSize
+               ndata := typ.size / sys.PtrSize
                count := dataSize / typ.size
-               totalptr := ((count-1)*typ.size + typ.ptrdata) / ptrSize
-               for i := uintptr(0); i < size/ptrSize; i++ {
+               totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
+               for i := uintptr(0); i < size/sys.PtrSize; i++ {
                        j := i % ndata
                        var have, want uint8
                        have = (*h.bitp >> h.shift) & (bitPointer | bitMarked)
@@ -1137,7 +1138,7 @@ Phase4:
                                print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
                                print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
                                print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
-                               println("at word", i, "offset", i*ptrSize, "have", have, "want", want)
+                               println("at word", i, "offset", i*sys.PtrSize, "have", have, "want", want)
                                if typ.kind&kindGCProg != 0 {
                                        println("GC program:")
                                        dumpGCProg(addb(typ.gcdata, 4))
@@ -1168,14 +1169,14 @@ var debugPtrmask struct {
 // so that the relevant bitmap bytes are not shared with surrounding
 // objects and need not be accessed with atomic instructions.
 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
-       if ptrSize == 8 && allocSize%(4*ptrSize) != 0 {
+       if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
                // Alignment will be wrong.
                throw("heapBitsSetTypeGCProg: small allocation")
        }
        var totalBits uintptr
        if elemSize == dataSize {
                totalBits = runGCProg(prog, nil, h.bitp, 2)
-               if totalBits*ptrSize != progSize {
+               if totalBits*sys.PtrSize != progSize {
                        println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
                        throw("heapBitsSetTypeGCProg: unexpected bit count")
                }
@@ -1190,7 +1191,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
                // repeats that first element to fill the array.
                var trailer [40]byte // 3 varints (max 10 each) + some bytes
                i := 0
-               if n := elemSize/ptrSize - progSize/ptrSize; n > 0 {
+               if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
                        // literal(0)
                        trailer[i] = 0x01
                        i++
@@ -1212,7 +1213,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
                // repeat(elemSize/ptrSize, count-1)
                trailer[i] = 0x80
                i++
-               n := elemSize / ptrSize
+               n := elemSize / sys.PtrSize
                for ; n >= 0x80; n >>= 7 {
                        trailer[i] = byte(n | 0x80)
                        i++
@@ -1236,7 +1237,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
                // last element. This will cause the code below to
                // memclr the dead section of the final array element,
                // so that scanobject can stop early in the final element.
-               totalBits = (elemSize*(count-1) + progSize) / ptrSize
+               totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
        }
        endProg := unsafe.Pointer(subtractb(h.bitp, (totalBits+3)/4))
        endAlloc := unsafe.Pointer(subtractb(h.bitp, allocSize/heapBitmapScale))
@@ -1247,7 +1248,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
 // size the size of the region described by prog, in bytes.
 // The resulting bitvector will have no more than size/ptrSize bits.
 func progToPointerMask(prog *byte, size uintptr) bitvector {
-       n := (size/ptrSize + 7) / 8
+       n := (size/sys.PtrSize + 7) / 8
        x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
        x[len(x)-1] = 0xa1 // overflow check sentinel
        n = runGCProg(prog, nil, &x[0], 1)
@@ -1385,7 +1386,7 @@ Run:
                // the pattern to a bit buffer holding at most 7 bits (a partial byte)
                // it will not overflow.
                src := dst
-               const maxBits = ptrSize*8 - 7
+               const maxBits = sys.PtrSize*8 - 7
                if n <= maxBits {
                        // Start with bits in output buffer.
                        pattern := bits
@@ -1438,7 +1439,7 @@ Run:
                                nb := npattern
                                if nb+nb <= maxBits {
                                        // Double pattern until the whole uintptr is filled.
-                                       for nb <= ptrSize*8 {
+                                       for nb <= sys.PtrSize*8 {
                                                b |= b << nb
                                                nb += nb
                                        }
@@ -1627,7 +1628,7 @@ func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
 func reflect_gcbits(x interface{}) []byte {
        ret := getgcmask(x)
        typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
-       nptr := typ.ptrdata / ptrSize
+       nptr := typ.ptrdata / sys.PtrSize
        for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
                ret = ret[:len(ret)-1]
        }
@@ -1645,10 +1646,10 @@ func getgcmask(ep interface{}) (mask []byte) {
                if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
                        bitmap := datap.gcdatamask.bytedata
                        n := (*ptrtype)(unsafe.Pointer(t)).elem.size
-                       mask = make([]byte, n/ptrSize)
-                       for i := uintptr(0); i < n; i += ptrSize {
-                               off := (uintptr(p) + i - datap.data) / ptrSize
-                               mask[i/ptrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+                       mask = make([]byte, n/sys.PtrSize)
+                       for i := uintptr(0); i < n; i += sys.PtrSize {
+                               off := (uintptr(p) + i - datap.data) / sys.PtrSize
+                               mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
                        }
                        return
                }
@@ -1657,10 +1658,10 @@ func getgcmask(ep interface{}) (mask []byte) {
                if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
                        bitmap := datap.gcbssmask.bytedata
                        n := (*ptrtype)(unsafe.Pointer(t)).elem.size
-                       mask = make([]byte, n/ptrSize)
-                       for i := uintptr(0); i < n; i += ptrSize {
-                               off := (uintptr(p) + i - datap.bss) / ptrSize
-                               mask[i/ptrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+                       mask = make([]byte, n/sys.PtrSize)
+                       for i := uintptr(0); i < n; i += sys.PtrSize {
+                               off := (uintptr(p) + i - datap.bss) / sys.PtrSize
+                               mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
                        }
                        return
                }
@@ -1670,14 +1671,14 @@ func getgcmask(ep interface{}) (mask []byte) {
        var n uintptr
        var base uintptr
        if mlookup(uintptr(p), &base, &n, nil) != 0 {
-               mask = make([]byte, n/ptrSize)
-               for i := uintptr(0); i < n; i += ptrSize {
+               mask = make([]byte, n/sys.PtrSize)
+               for i := uintptr(0); i < n; i += sys.PtrSize {
                        hbits := heapBitsForAddr(base + i)
                        if hbits.isPointer() {
-                               mask[i/ptrSize] = 1
+                               mask[i/sys.PtrSize] = 1
                        }
-                       if i >= 2*ptrSize && !hbits.isMarked() {
-                               mask = mask[:i/ptrSize]
+                       if i >= 2*sys.PtrSize && !hbits.isMarked() {
+                               mask = mask[:i/sys.PtrSize]
                                break
                        }
                }
@@ -1708,13 +1709,13 @@ func getgcmask(ep interface{}) (mask []byte) {
                                return
                        }
                        bv := stackmapdata(stkmap, pcdata)
-                       size := uintptr(bv.n) * ptrSize
+                       size := uintptr(bv.n) * sys.PtrSize
                        n := (*ptrtype)(unsafe.Pointer(t)).elem.size
-                       mask = make([]byte, n/ptrSize)
-                       for i := uintptr(0); i < n; i += ptrSize {
+                       mask = make([]byte, n/sys.PtrSize)
+                       for i := uintptr(0); i < n; i += sys.PtrSize {
                                bitmap := bv.bytedata
-                               off := (uintptr(p) + i - frame.varp + size) / ptrSize
-                               mask[i/ptrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+                               off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
+                               mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
                        }
                }
                return
index ecab584d04958ad69a3b7a2989c04f12fc1ca867..1e388ec7281646e8f63141d3132bd2390554aa23 100644 (file)
@@ -6,7 +6,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // Don't split the stack as this function may be invoked without a valid G,
 // which prevents us from allocating more stack.
@@ -43,7 +46,7 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
        // On 64-bit, people with ulimit -v set complain if we reserve too
        // much address space.  Instead, assume that the reservation is okay
        // and check the assumption in SysMap.
-       if ptrSize == 8 && uint64(n) > 1<<32 || goos_nacl != 0 {
+       if sys.PtrSize == 8 && uint64(n) > 1<<32 || sys.GoosNacl != 0 {
                *reserved = false
                return v
        }
index e8c8999847a284dc0866bb571c3aa14d421aa63a..330504ba9dbaf6f9531d0edbf5fd0f14fa5a9104 100644 (file)
@@ -4,10 +4,13 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 const (
-       _PAGE_SIZE = _PhysPageSize
+       _PAGE_SIZE = sys.PhysPageSize
        _EACCES    = 13
 )
 
@@ -94,8 +97,8 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
        // gets most of the benefit of huge pages while keeping the
        // number of VMAs under control. With hugePageSize = 2MB, even
        // a pessimal heap can reach 128GB before running out of VMAs.
-       if hugePageSize != 0 {
-               var s uintptr = hugePageSize // division by constant 0 is a compile-time error :(
+       if sys.HugePageSize != 0 {
+               var s uintptr = sys.HugePageSize // division by constant 0 is a compile-time error :(
 
                // If it's a large allocation, we want to leave huge
                // pages enabled. Hence, we only adjust the huge page
@@ -114,17 +117,17 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
                // Note that madvise will return EINVAL if the flag is
                // already set, which is quite likely. We ignore
                // errors.
-               if head != 0 && head+hugePageSize == tail {
+               if head != 0 && head+sys.HugePageSize == tail {
                        // head and tail are different but adjacent,
                        // so do this in one call.
-                       madvise(unsafe.Pointer(head), 2*hugePageSize, _MADV_NOHUGEPAGE)
+                       madvise(unsafe.Pointer(head), 2*sys.HugePageSize, _MADV_NOHUGEPAGE)
                } else {
                        // Advise the huge pages containing v and v+n-1.
                        if head != 0 {
-                               madvise(unsafe.Pointer(head), hugePageSize, _MADV_NOHUGEPAGE)
+                               madvise(unsafe.Pointer(head), sys.HugePageSize, _MADV_NOHUGEPAGE)
                        }
                        if tail != 0 && tail != head {
-                               madvise(unsafe.Pointer(tail), hugePageSize, _MADV_NOHUGEPAGE)
+                               madvise(unsafe.Pointer(tail), sys.HugePageSize, _MADV_NOHUGEPAGE)
                        }
                }
        }
@@ -133,7 +136,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
 }
 
 func sysUsed(v unsafe.Pointer, n uintptr) {
-       if hugePageSize != 0 {
+       if sys.HugePageSize != 0 {
                // Partially undo the NOHUGEPAGE marks from sysUnused
                // for whole huge pages between v and v+n. This may
                // leave huge pages off at the end points v and v+n
@@ -142,7 +145,7 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
                // the end points as well, but it's probably not worth
                // the cost because when neighboring allocations are
                // freed sysUnused will just set NOHUGEPAGE again.
-               var s uintptr = hugePageSize
+               var s uintptr = sys.HugePageSize
 
                // Round v up to a huge page boundary.
                beg := (uintptr(v) + (s - 1)) &^ (s - 1)
@@ -172,7 +175,7 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
        // much address space.  Instead, assume that the reservation is okay
        // if we can reserve at least 64K and check the assumption in SysMap.
        // Only user-mode Linux (UML) rejects these requests.
-       if ptrSize == 8 && uint64(n) > 1<<32 {
+       if sys.PtrSize == 8 && uint64(n) > 1<<32 {
                p := mmap_fixed(v, 64<<10, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
                if p != v {
                        if uintptr(p) >= 4096 {
index 8f0f31e7e81d78fd4ff4bf8bc2ce6b218edf7ca3..512edeffe81444ed920439d27f8fecceff2d02a7 100644 (file)
@@ -8,6 +8,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -16,14 +17,14 @@ type finblock struct {
        next    *finblock
        cnt     int32
        _       int32
-       fin     [(_FinBlockSize - 2*ptrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
+       fin     [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
 }
 
 var finlock mutex  // protects the following variables
 var fing *g        // goroutine that runs finalizers
 var finq *finblock // list of finalizers that are to be executed
 var finc *finblock // cache of free blocks
-var finptrmask [_FinBlockSize / ptrSize / 8]byte
+var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
 var fingwait bool
 var fingwake bool
 var allfin *finblock // list of all blocks
@@ -76,12 +77,12 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
                        if finptrmask[0] == 0 {
                                // Build pointer mask for Finalizer array in block.
                                // Check assumptions made in finalizer1 array above.
-                               if (unsafe.Sizeof(finalizer{}) != 5*ptrSize ||
+                               if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
                                        unsafe.Offsetof(finalizer{}.fn) != 0 ||
-                                       unsafe.Offsetof(finalizer{}.arg) != ptrSize ||
-                                       unsafe.Offsetof(finalizer{}.nret) != 2*ptrSize ||
-                                       unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
-                                       unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize) {
+                                       unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
+                                       unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
+                                       unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
+                                       unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
                                        throw("finalizer out of sync")
                                }
                                for i := range finptrmask {
@@ -361,7 +362,7 @@ okarg:
        for _, t := range ft.out {
                nret = round(nret, uintptr(t.align)) + uintptr(t.size)
        }
-       nret = round(nret, ptrSize)
+       nret = round(nret, sys.PtrSize)
 
        // make sure we have a finalizer goroutine
        createfing()
@@ -379,7 +380,7 @@ okarg:
 func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
        c := gomcache()
        c.local_nlookup++
-       if ptrSize == 4 && c.local_nlookup >= 1<<30 {
+       if sys.PtrSize == 4 && c.local_nlookup >= 1<<30 {
                // purge cache stats to prevent overflow
                lock(&mheap_.lock)
                purgecachedstats(c)
@@ -394,7 +395,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
        }
        p := uintptr(v) >> pageShift
        q := p - arena_start>>pageShift
-       s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize))
+       s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*sys.PtrSize))
        if s == nil {
                return
        }
index b49452a3ea271946823002b532031a09a6210ed3..e60355083d3c51849bb22a4081a17567ca5f8791 100644 (file)
@@ -122,6 +122,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -370,7 +371,7 @@ type gcControllerState struct {
        // at the end of of each cycle.
        triggerRatio float64
 
-       _ [_CacheLineSize]byte
+       _ [sys.CacheLineSize]byte
 
        // fractionalMarkWorkersNeeded is the number of fractional
        // mark workers that need to be started. This is either 0 or
@@ -378,7 +379,7 @@ type gcControllerState struct {
        // scheduling point (hence it gets its own cache line).
        fractionalMarkWorkersNeeded int64
 
-       _ [_CacheLineSize]byte
+       _ [sys.CacheLineSize]byte
 }
 
 // startCycle resets the GC controller's state and computes estimates
@@ -730,9 +731,9 @@ const gcAssistTimeSlack = 5000
 const gcOverAssistBytes = 1 << 20
 
 var work struct {
-       full  uint64                // lock-free list of full blocks workbuf
-       empty uint64                // lock-free list of empty blocks workbuf
-       pad0  [_CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
+       full  uint64                   // lock-free list of full blocks workbuf
+       empty uint64                   // lock-free list of empty blocks workbuf
+       pad0  [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
 
        markrootNext uint32 // next markroot job
        markrootJobs uint32 // number of markroot jobs
index c038dea04a9ae040e4def1f6a2f800396265728f..455ee34ec239ff8fa697d05693fc498a3acf5dd0 100644 (file)
@@ -8,6 +8,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -201,7 +202,7 @@ func markroot(i uint32) {
 //
 //go:nowritebarrier
 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
-       if rootBlockBytes%(8*ptrSize) != 0 {
+       if rootBlockBytes%(8*sys.PtrSize) != 0 {
                // This is necessary to pick byte offsets in ptrmask0.
                throw("rootBlockBytes must be a multiple of 8*ptrSize")
        }
@@ -210,7 +211,7 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
        if b >= b0+n0 {
                return
        }
-       ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*ptrSize))))
+       ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
        n := uintptr(rootBlockBytes)
        if b+n > b0+n0 {
                n = b0 + n0 - b
@@ -300,7 +301,7 @@ func markrootSpans(gcw *gcWork, shard int) {
                        scanobject(p, gcw)
 
                        // The special itself is a root.
-                       scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptrmask[0], gcw)
+                       scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw)
                }
 
                unlock(&s.speciallock)
@@ -704,11 +705,11 @@ func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
        // Scan local variables if stack frame has been allocated.
        size := frame.varp - frame.sp
        var minsize uintptr
-       switch thechar {
+       switch sys.TheChar {
        case '7':
-               minsize = spAlign
+               minsize = sys.SpAlign
        default:
-               minsize = minFrameSize
+               minsize = sys.MinFrameSize
        }
        if size > minsize {
                stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
@@ -724,7 +725,7 @@ func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
                        throw("scanframe: bad symbol table")
                }
                bv := stackmapdata(stkmap, pcdata)
-               size = uintptr(bv.n) * ptrSize
+               size = uintptr(bv.n) * sys.PtrSize
                scanblock(frame.varp-size, size, bv.bytedata, gcw)
        }
 
@@ -746,7 +747,7 @@ func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
                        }
                        bv = stackmapdata(stkmap, pcdata)
                }
-               scanblock(frame.argp, uintptr(bv.n)*ptrSize, bv.bytedata, gcw)
+               scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw)
        }
 }
 
@@ -912,9 +913,9 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
 
        for i := uintptr(0); i < n; {
                // Find bits for the next word.
-               bits := uint32(*addb(ptrmask, i/(ptrSize*8)))
+               bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
                if bits == 0 {
-                       i += ptrSize * 8
+                       i += sys.PtrSize * 8
                        continue
                }
                for j := 0; j < 8 && i < n; j++ {
@@ -928,7 +929,7 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
                                }
                        }
                        bits >>= 1
-                       i += ptrSize
+                       i += sys.PtrSize
                }
        }
 }
@@ -962,7 +963,7 @@ func scanobject(b uintptr, gcw *gcWork) {
        }
 
        var i uintptr
-       for i = 0; i < n; i += ptrSize {
+       for i = 0; i < n; i += sys.PtrSize {
                // Find bits for this word.
                if i != 0 {
                        // Avoid needless hbits.next() on last iteration.
@@ -973,7 +974,7 @@ func scanobject(b uintptr, gcw *gcWork) {
                // are pointers, or else they'd be merged with other non-pointer
                // data into larger allocations.
                bits := hbits.bits()
-               if i >= 2*ptrSize && bits&bitMarked == 0 {
+               if i >= 2*sys.PtrSize && bits&bitMarked == 0 {
                        break // no more pointers in this object
                }
                if bits&bitPointer == 0 {
@@ -1019,7 +1020,7 @@ func shade(b uintptr) {
 //go:nowritebarrier
 func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork) {
        // obj should be start of allocation, and so must be at least pointer-aligned.
-       if obj&(ptrSize-1) != 0 {
+       if obj&(sys.PtrSize-1) != 0 {
                throw("greyobject: obj not pointer-aligned")
        }
 
@@ -1087,11 +1088,11 @@ func gcDumpObject(label string, obj, off uintptr) {
        }
        print(" s.start*_PageSize=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
        skipped := false
-       for i := uintptr(0); i < s.elemsize; i += ptrSize {
+       for i := uintptr(0); i < s.elemsize; i += sys.PtrSize {
                // For big objects, just print the beginning (because
                // that usually hints at the object's type) and the
                // fields around off.
-               if !(i < 128*ptrSize || off-16*ptrSize < i && i < off+16*ptrSize) {
+               if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
                        skipped = true
                        continue
                }
index e670689f1e1e375f8cb1693e94baddb37cca8627..22f51dbc1ab97be452e2255f73c39f6bdcdc6611 100644 (file)
@@ -8,6 +8,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -280,10 +281,10 @@ func (s *mspan) sweep(preserve bool) bool {
                        freeToHeap = true
                } else {
                        // Free small object.
-                       if size > 2*ptrSize {
-                               *(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
-                       } else if size > ptrSize {
-                               *(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
+                       if size > 2*sys.PtrSize {
+                               *(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
+                       } else if size > sys.PtrSize {
+                               *(*uintptr)(unsafe.Pointer(p + sys.PtrSize)) = 0
                        }
                        if head.ptr() == nil {
                                head = gclinkptr(p)
index 3654778c945e616b1e2f8cc1605743970abeda70..0a0285d8163ca9f0225f8de06a7961213f1dc748 100644 (file)
@@ -6,6 +6,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -265,7 +266,7 @@ type workbufhdr struct {
 type workbuf struct {
        workbufhdr
        // account for the above fields
-       obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / ptrSize]uintptr
+       obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
 }
 
 // workbuf factory routines. These funcs are used to manage the
@@ -343,7 +344,7 @@ func getempty(entry int) *workbuf {
                }
        }
        if b == nil {
-               b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
+               b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
        }
        b.logget(entry)
        return b
index 2feba436f44887920b4fa114e57ee9507be89ac5..d04297cc809d6c1ed458c1b04b8c2fabc24f0a55 100644 (file)
@@ -10,6 +10,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -58,7 +59,7 @@ type mheap struct {
        // gets its own cache line.
        central [_NumSizeClasses]struct {
                mcentral mcentral
-               pad      [_CacheLineSize]byte
+               pad      [sys.CacheLineSize]byte
        }
 
        spanalloc             fixalloc // allocator for span*
@@ -169,13 +170,13 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
        h := (*mheap)(vh)
        s := (*mspan)(p)
        if len(h_allspans) >= cap(h_allspans) {
-               n := 64 * 1024 / ptrSize
+               n := 64 * 1024 / sys.PtrSize
                if n < cap(h_allspans)*3/2 {
                        n = cap(h_allspans) * 3 / 2
                }
                var new []*mspan
                sp := (*slice)(unsafe.Pointer(&new))
-               sp.array = sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys)
+               sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
                if sp.array == nil {
                        throw("runtime: cannot allocate memory")
                }
@@ -186,7 +187,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
                        // Don't free the old array if it's referenced by sweep.
                        // See the comment in mgc.go.
                        if h.allspans != mheap_.gcspans {
-                               sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*ptrSize, &memstats.other_sys)
+                               sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys)
                        }
                }
                h_allspans = new
@@ -239,7 +240,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
        _g_ := getg()
 
        _g_.m.mcache.local_nlookup++
-       if ptrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
+       if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
                // purge cache stats to prevent overflow
                lock(&mheap_.lock)
                purgecachedstats(_g_.m.mcache)
@@ -305,8 +306,8 @@ func (h *mheap) init(spans_size uintptr) {
 
        sp := (*slice)(unsafe.Pointer(&h_spans))
        sp.array = unsafe.Pointer(h.spans)
-       sp.len = int(spans_size / ptrSize)
-       sp.cap = int(spans_size / ptrSize)
+       sp.len = int(spans_size / sys.PtrSize)
+       sp.cap = int(spans_size / sys.PtrSize)
 }
 
 // mHeap_MapSpans makes sure that the spans are mapped
@@ -321,8 +322,8 @@ func (h *mheap) mapSpans(arena_used uintptr) {
        // Map spans array, PageSize at a time.
        n := arena_used
        n -= h.arena_start
-       n = n / _PageSize * ptrSize
-       n = round(n, _PhysPageSize)
+       n = n / _PageSize * sys.PtrSize
+       n = round(n, sys.PhysPageSize)
        if h.spans_mapped >= n {
                return
        }
@@ -797,7 +798,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
                        h.spanalloc.free(unsafe.Pointer(t))
                }
        }
-       if (p+s.npages)*ptrSize < h.spans_mapped {
+       if (p+s.npages)*sys.PtrSize < h.spans_mapped {
                t := h_spans[p+s.npages]
                if t != nil && t.state == _MSpanFree {
                        s.npages += t.npages
@@ -829,7 +830,7 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
 }
 
 func scavengelist(list *mSpanList, now, limit uint64) uintptr {
-       if _PhysPageSize > _PageSize {
+       if sys.PhysPageSize > _PageSize {
                // golang.org/issue/9993
                // If the physical page size of the machine is larger than
                // our logical heap page size the kernel may round up the
@@ -1098,7 +1099,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p
                        scanobject(uintptr(base), gcw)
                        // Mark the finalizer itself, since the
                        // special isn't part of the GC'd heap.
-                       scanblock(uintptr(unsafe.Pointer(&s.fn)), ptrSize, &oneptrmask[0], gcw)
+                       scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
                        if gcBlackenPromptly {
                                gcw.dispose()
                        }
index 74226904dceba01f43ec50db474bf8517eb923b6..2db01da375e14cdd8334b9a07cc49cabdd6545b4 100644 (file)
@@ -8,6 +8,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -369,7 +370,7 @@ func purgecachedstats(c *mcache) {
 // overflow errors.
 //go:nosplit
 func mSysStatInc(sysStat *uint64, n uintptr) {
-       if _BigEndian != 0 {
+       if sys.BigEndian != 0 {
                atomic.Xadd64(sysStat, int64(n))
                return
        }
@@ -383,7 +384,7 @@ func mSysStatInc(sysStat *uint64, n uintptr) {
 // mSysStatInc apply.
 //go:nosplit
 func mSysStatDec(sysStat *uint64, n uintptr) {
-       if _BigEndian != 0 {
+       if sys.BigEndian != 0 {
                atomic.Xadd64(sysStat, -int64(n))
                return
        }
index 9a27d2adf954575bba78b2b79476269c3c1909d8..964e09ed0a26e486be50c801a6d983ad304d5ded 100644 (file)
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 const debugStackBarrier = false
 
@@ -170,9 +173,9 @@ func gcInstallStackBarrier(gp *g, frame *stkframe) bool {
        if usesLR {
                lrUintptr = frame.sp
        } else {
-               lrUintptr = frame.fp - regSize
+               lrUintptr = frame.fp - sys.RegSize
        }
-       lrPtr := (*uintreg)(unsafe.Pointer(lrUintptr))
+       lrPtr := (*sys.Uintreg)(unsafe.Pointer(lrUintptr))
        if debugStackBarrier {
                print("install stack barrier at ", hex(lrUintptr), " over ", hex(*lrPtr), ", goid=", gp.goid, "\n")
                if uintptr(*lrPtr) != frame.lr {
@@ -185,7 +188,7 @@ func gcInstallStackBarrier(gp *g, frame *stkframe) bool {
        stkbar := &gp.stkbar[len(gp.stkbar)-1]
        stkbar.savedLRPtr = lrUintptr
        stkbar.savedLRVal = uintptr(*lrPtr)
-       *lrPtr = uintreg(stackBarrierPC)
+       *lrPtr = sys.Uintreg(stackBarrierPC)
        return true
 }
 
@@ -218,8 +221,8 @@ func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
        if debugStackBarrier {
                print("remove stack barrier at ", hex(stkbar.savedLRPtr), " with ", hex(stkbar.savedLRVal), ", goid=", gp.goid, "\n")
        }
-       lrPtr := (*uintreg)(unsafe.Pointer(stkbar.savedLRPtr))
-       if val := *lrPtr; val != uintreg(stackBarrierPC) {
+       lrPtr := (*sys.Uintreg)(unsafe.Pointer(stkbar.savedLRPtr))
+       if val := *lrPtr; val != sys.Uintreg(stackBarrierPC) {
                printlock()
                print("at *", hex(stkbar.savedLRPtr), " expected stack barrier PC ", hex(stackBarrierPC), ", found ", hex(val), ", goid=", gp.goid, "\n")
                print("gp.stkbar=")
@@ -227,7 +230,7 @@ func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
                print(", gp.stkbarPos=", gp.stkbarPos, ", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
                throw("stack barrier lost")
        }
-       *lrPtr = uintreg(stkbar.savedLRVal)
+       *lrPtr = sys.Uintreg(stkbar.savedLRVal)
 }
 
 // gcPrintStkbars prints a []stkbar for debugging.
index f3519f3490fdd786449be57cc787a69eae2a0b28..faf27f411cf33af31e89e0d7e965d61771159fa4 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // From FreeBSD's <sys/sysctl.h>
 const (
@@ -133,7 +136,7 @@ func minit() {
 
        // m.procid is a uint64, but thr_new writes a uint32 on 32-bit systems.
        // Fix it up. (Only matters on big-endian, but be clean anyway.)
-       if ptrSize == 4 {
+       if sys.PtrSize == 4 {
                _g_.m.procid = uint64(*(*uint32)(unsafe.Pointer(&_g_.m.procid)))
        }
 
index 1d21d3b5aa72d8f9cf23d7e5b7f624dbea8d4245..d59ca3915efa8773440deb32495c295ec9d1852b 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // Linux futex.
 //
@@ -44,7 +47,7 @@ func futexsleep(addr *uint32, val uint32, ns int64) {
        // But on real 64-bit systems, where words are larger but the stack limit
        // is not, even timediv is too heavy, and we really need to use just an
        // ordinary machine instruction.
-       if ptrSize == 8 {
+       if sys.PtrSize == 8 {
                ts.set_sec(ns / 1000000000)
                ts.set_nsec(int32(ns % 1000000000))
        } else {
@@ -81,10 +84,10 @@ func getproccount() int32 {
        // buffers, but we don't have a dynamic memory allocator at the
        // moment, so that's a bit tricky and seems like overkill.
        const maxCPUs = 64 * 1024
-       var buf [maxCPUs / (ptrSize * 8)]uintptr
+       var buf [maxCPUs / (sys.PtrSize * 8)]uintptr
        r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
        n := int32(0)
-       for _, v := range buf[:r/ptrSize] {
+       for _, v := range buf[:r/sys.PtrSize] {
                for v != 0 {
                        n += int32(v & 1)
                        v >>= 1
index 43918bb0546be69d97212b9d3bc1aeeab914c61c..f660cc72a7606a8f94b1f9354c5f6b4db87c03d8 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // May run during STW, so write barriers are not allowed.
 //go:nowritebarrier
@@ -69,11 +72,11 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
                // to sigpanic instead. (Otherwise the trace will end at
                // sigpanic and we won't get to see who faulted).
                if pc != 0 {
-                       if regSize > ptrSize {
-                               sp -= ptrSize
+                       if sys.RegSize > sys.PtrSize {
+                               sp -= sys.PtrSize
                                *(*uintptr)(unsafe.Pointer(sp)) = 0
                        }
-                       sp -= ptrSize
+                       sp -= sys.PtrSize
                        *(*uintptr)(unsafe.Pointer(sp)) = pc
                        c.setsp(sp)
                }
index e2120da905875bafa67c3f291894f46068ca2a06..3577a2406b9741d5224ea68bbe7bab5e5d4e161b 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 const (
        _AT_NULL    = 0
@@ -21,7 +24,7 @@ func sysargs(argc int32, argv **byte) {
                n++
        }
        n++
-       auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+       auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
 
        for i := 0; auxv[i] != _AT_NULL; i += 2 {
                switch auxv[i] {
index 3749640ee52182c125ea3b5035bab64c3ff3194c..8fdfb585ba6e03be084c8db432d484791712098f 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 const (
        _AT_NULL     = 0
@@ -40,7 +43,7 @@ func sysargs(argc int32, argv **byte) {
                n++
        }
        n++
-       auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+       auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
 
        for i := 0; auxv[i] != _AT_NULL; i += 2 {
                switch auxv[i] {
index 2db43bd424aeccde85ec813b7bf698e356cfdbe6..9e11cb3e12cac91d8f9b64e8814197dd763e75ee 100644 (file)
@@ -6,7 +6,10 @@
 
 package runtime
 
-import "runtime/internal/atomic"
+import (
+       "runtime/internal/atomic"
+       "runtime/internal/sys"
+)
 
 // A parfor holds state for the parallel for operation.
 type parfor struct {
@@ -38,7 +41,7 @@ type parforthread struct {
        nprocyield uint64
        nosyield   uint64
        nsleep     uint64
-       pad        [_CacheLineSize]byte
+       pad        [sys.CacheLineSize]byte
 }
 
 func parforalloc(nthrmax uint32) *parfor {
index 74158a4423796f6b2e5fa1e9ec1e25f80ccda6b9..11ecdae5ae4b35ee9632c3631aafd92e2e3adfd0 100644 (file)
@@ -6,6 +6,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -55,7 +56,7 @@ func main() {
        // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
        // Using decimal instead of binary GB and MB because
        // they look nicer in the stack overflow failure message.
-       if ptrSize == 8 {
+       if sys.PtrSize == 8 {
                maxstacksize = 1000000000
        } else {
                maxstacksize = 250000000
@@ -306,7 +307,7 @@ func releaseSudog(s *sudog) {
 // It assumes that f is a func value. Otherwise the behavior is undefined.
 //go:nosplit
 func funcPC(f interface{}) uintptr {
-       return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
+       return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
 }
 
 // called from assembly
@@ -393,10 +394,10 @@ func schedinit() {
                throw("unknown runnable goroutine during bootstrap")
        }
 
-       if buildVersion == "" {
+       if sys.BuildVersion == "" {
                // Condition should never trigger.  This code just serves
                // to ensure runtime·buildVersion is kept in the resulting binary.
-               buildVersion = "unknown"
+               sys.BuildVersion = "unknown"
        }
 }
 
@@ -999,7 +1000,7 @@ func mstart() {
                // Cgo may have left stack size in stack.hi.
                size := _g_.stack.hi
                if size == 0 {
-                       size = 8192 * stackGuardMultiplier
+                       size = 8192 * sys.StackGuardMultiplier
                }
                _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
                _g_.stack.lo = _g_.stack.hi - size + 1024
@@ -1202,7 +1203,7 @@ func allocm(_p_ *p, fn func()) *m {
        if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
                mp.g0 = malg(-1)
        } else {
-               mp.g0 = malg(8192 * stackGuardMultiplier)
+               mp.g0 = malg(8192 * sys.StackGuardMultiplier)
        }
        mp.g0.m = mp
 
@@ -1305,9 +1306,9 @@ func newextram() {
        // the goroutine stack ends.
        mp := allocm(nil, nil)
        gp := malg(4096)
-       gp.sched.pc = funcPC(goexit) + _PCQuantum
+       gp.sched.pc = funcPC(goexit) + sys.PCQuantum
        gp.sched.sp = gp.stack.hi
-       gp.sched.sp -= 4 * regSize // extra space in case of reads slightly beyond frame
+       gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
        gp.sched.lr = 0
        gp.sched.g = guintptr(unsafe.Pointer(gp))
        gp.syscallpc = gp.sched.pc
@@ -2535,7 +2536,7 @@ func malg(stacksize int32) *g {
 // copied if a stack split occurred.
 //go:nosplit
 func newproc(siz int32, fn *funcval) {
-       argp := add(unsafe.Pointer(&fn), ptrSize)
+       argp := add(unsafe.Pointer(&fn), sys.PtrSize)
        pc := getcallerpc(unsafe.Pointer(&siz))
        systemstack(func() {
                newproc1(fn, (*uint8)(argp), siz, 0, pc)
@@ -2561,7 +2562,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
        // Not worth it: this is almost always an error.
        // 4*sizeof(uintreg): extra space added below
        // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
-       if siz >= _StackMin-4*regSize-regSize {
+       if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
                throw("newproc: function arguments too large for new goroutine")
        }
 
@@ -2580,21 +2581,21 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
                throw("newproc1: new g is not Gdead")
        }
 
-       totalSize := 4*regSize + uintptr(siz) + minFrameSize // extra space in case of reads slightly beyond frame
-       totalSize += -totalSize & (spAlign - 1)              // align to spAlign
+       totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
+       totalSize += -totalSize & (sys.SpAlign - 1)                  // align to spAlign
        sp := newg.stack.hi - totalSize
        spArg := sp
        if usesLR {
                // caller's LR
                *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil
-               spArg += minFrameSize
+               spArg += sys.MinFrameSize
        }
        memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
 
        memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
        newg.sched.sp = sp
        newg.stktopsp = sp
-       newg.sched.pc = funcPC(goexit) + _PCQuantum // +PCQuantum so that previous instruction is in same function
+       newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
        newg.sched.g = guintptr(unsafe.Pointer(newg))
        gostartcallfn(&newg.sched, fn)
        newg.gopc = callerpc
@@ -2928,13 +2929,13 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
                        n = 2
                        // "ExternalCode" is better than "etext".
                        if pc > firstmoduledata.etext {
-                               pc = funcPC(_ExternalCode) + _PCQuantum
+                               pc = funcPC(_ExternalCode) + sys.PCQuantum
                        }
                        stk[0] = pc
                        if mp.preemptoff != "" || mp.helpgc != 0 {
-                               stk[1] = funcPC(_GC) + _PCQuantum
+                               stk[1] = funcPC(_GC) + sys.PCQuantum
                        } else {
-                               stk[1] = funcPC(_System) + _PCQuantum
+                               stk[1] = funcPC(_System) + sys.PCQuantum
                        }
                }
        }
@@ -3981,7 +3982,7 @@ func setMaxThreads(in int) (out int) {
 }
 
 func haveexperiment(name string) bool {
-       x := goexperiment
+       x := sys.Goexperiment
        for x != "" {
                xname := ""
                i := index(x, ",")
index 9a468443fd5f851609776f4bc76cefbc86cb0695..4c0242350cc561cf838159f1f971617e6166bcd0 100644 (file)
@@ -6,6 +6,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -52,7 +53,7 @@ var (
 // nosplit for use in linux/386 startup linux_setup_vdso
 //go:nosplit
 func argv_index(argv **byte, i int32) *byte {
-       return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*ptrSize))
+       return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
 }
 
 func args(c int32, v **byte) {
@@ -192,10 +193,10 @@ func check() {
        if unsafe.Sizeof(j) != 8 {
                throw("bad j")
        }
-       if unsafe.Sizeof(k) != ptrSize {
+       if unsafe.Sizeof(k) != sys.PtrSize {
                throw("bad k")
        }
-       if unsafe.Sizeof(l) != ptrSize {
+       if unsafe.Sizeof(l) != sys.PtrSize {
                throw("bad l")
        }
        if unsafe.Sizeof(x1) != 1 {
@@ -238,7 +239,7 @@ func check() {
        }
 
        k = unsafe.Pointer(uintptr(0xfedcb123))
-       if ptrSize == 8 {
+       if sys.PtrSize == 8 {
                k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
        }
        if casp(&k, nil, nil) {
index aafb8cf3cdb5eac90f6980499860ef4679b012dd..9ec0d1545edde90152924ace7631f491c28daf44 100644 (file)
@@ -6,6 +6,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -48,14 +49,6 @@ const (
        _Pdead
 )
 
-// The next line makes 'go generate' write the zgen_*.go files with
-// per-OS and per-arch information, including constants
-// named goos_$GOOS and goarch_$GOARCH for every
-// known GOOS and GOARCH. The constant is 1 on the
-// current system, 0 otherwise; multiplying by them is
-// useful for defining GOOS- or GOARCH-specific constants.
-//go:generate go run gengoos.go
-
 type mutex struct {
        // Futex-based impl treats it as uint32 key,
        // while sema-based impl as M* waitm.
@@ -151,7 +144,7 @@ type gobuf struct {
        pc   uintptr
        g    guintptr
        ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
-       ret  uintreg
+       ret  sys.Uintreg
        lr   uintptr
        bp   uintptr // for GOEXPERIMENT=framepointer
 }
@@ -533,7 +526,7 @@ type forcegcstate struct {
  * known to compiler
  */
 const (
-       _Structrnd = regSize
+       _Structrnd = sys.RegSize
 )
 
 // startup_random_data holds random bytes initialized at startup.  These come from
@@ -553,7 +546,7 @@ func extendRandom(r []byte, n int) {
                        w = 16
                }
                h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
-               for i := 0; i < ptrSize && n < len(r); i++ {
+               for i := 0; i < sys.PtrSize && n < len(r); i++ {
                        r[n] = byte(h)
                        n++
                        h >>= 8
index 508a19b630508c7ac89a0bb210bc8748cd7049e7..b6c3fea001db858c2b17621c6d280ab56a2988f3 100644 (file)
@@ -6,7 +6,10 @@ package runtime
 
 // This file contains the implementation of Go select statements.
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 const (
        debugSelect = false
@@ -51,7 +54,7 @@ func selectsize(size uintptr) uintptr {
                (size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
                size*unsafe.Sizeof(*hselect{}.lockorder) +
                size*unsafe.Sizeof(*hselect{}.pollorder)
-       return round(selsize, _Int64Align)
+       return round(selsize, sys.Int64Align)
 }
 
 func newselect(sel *hselect, selsize int64, size int32) {
index d9bf4c1cfd5631bf5ce05f70287cc2dc8f6b1f8a..b54621bad8445965442ede6f8adf3fd727dc5498 100644 (file)
@@ -21,6 +21,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -38,7 +39,7 @@ const semTabSize = 251
 
 var semtable [semTabSize]struct {
        root semaRoot
-       pad  [_CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
+       pad  [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
 }
 
 //go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
index 8cabbc20e21efcb220a2f153b3a75987df919d16..3a8b5ba4607863737bacb164f75bc78b1c30db47 100644 (file)
@@ -6,6 +6,8 @@
 
 package runtime
 
+import "runtime/internal/sys"
+
 const (
        _SIG_DFL uintptr = 0
        _SIG_IGN uintptr = 1
@@ -185,7 +187,7 @@ func crash() {
                // this means the OS X core file will be >128 GB and even on a zippy
                // workstation can take OS X well over an hour to write (uninterruptible).
                // Save users from making that mistake.
-               if ptrSize == 8 {
+               if sys.PtrSize == 8 {
                        return
                }
        }
index 04218f97ea32df02b3fb3554ae5dfae25cb87749..90d69ee389acac0051926b29a6936a7c85c7d608 100644 (file)
@@ -6,7 +6,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 func dumpregs(c *sigctxt) {
        print("eax    ", hex(c.eax()), "\n")
@@ -85,11 +88,11 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
                // (Otherwise the trace will end at runtime.sigpanic and we
                // won't get to see who faulted.)
                if pc != 0 {
-                       if regSize > ptrSize {
-                               sp -= ptrSize
+                       if sys.RegSize > sys.PtrSize {
+                               sp -= sys.PtrSize
                                *(*uintptr)(unsafe.Pointer(sp)) = 0
                        }
-                       sp -= ptrSize
+                       sp -= sys.PtrSize
                        *(*uintptr)(unsafe.Pointer(sp)) = pc
                        c.set_esp(uint32(sp))
                }
index 473f762918846fa3916a40a976629835ffa964e4..df317e3835cc06efb10c56fc9eff34b0df18f35d 100644 (file)
@@ -8,6 +8,7 @@
 package runtime
 
 import (
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -119,11 +120,11 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
                // (Otherwise the trace will end at runtime.sigpanic and we
                // won't get to see who faulted.)
                if pc != 0 {
-                       if regSize > ptrSize {
-                               sp -= ptrSize
+                       if sys.RegSize > sys.PtrSize {
+                               sp -= sys.PtrSize
                                *(*uintptr)(unsafe.Pointer(sp)) = 0
                        }
-                       sp -= ptrSize
+                       sp -= sys.PtrSize
                        *(*uintptr)(unsafe.Pointer(sp)) = pc
                        c.set_rsp(uint64(sp))
                }
index 18ecdc29ce3d4319989d874bb8e54a5ce09129d2..96a4cb3dac7e036344698650a24dd44f88393ed5 100644 (file)
@@ -6,7 +6,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 func dumpregs(c *sigctxt) {
        print("r0      ", hex(c.r0()), "\n")
@@ -78,7 +81,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
                // functions are correctly handled. This smashes
                // the stack frame but we're not going back there
                // anyway.
-               sp := c.sp() - spAlign // needs only sizeof uint64, but must align the stack
+               sp := c.sp() - sys.SpAlign // needs only sizeof uint64, but must align the stack
                c.set_sp(sp)
                *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr()
 
index 085f66e8984cfe12a4d844bcde734c39c9f9898f..45074f9d905afd8070b43405693677f6cc0ddd03 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 type sigctxt struct {
        info *siginfo
@@ -32,5 +35,5 @@ func (c *sigctxt) set_eip(x uint32)     { c.regs().eip = x }
 func (c *sigctxt) set_esp(x uint32)     { c.regs().esp = x }
 func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
 func (c *sigctxt) set_sigaddr(x uint32) {
-       *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+       *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
 }
index 5e339b8a46fe1718c2c8d6d7a02a3e1dc89e50e2..b8b38ccca9cf92011e055e77a126d85ee7a5cda5 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 type sigctxt struct {
        info *siginfo
@@ -42,5 +45,5 @@ func (c *sigctxt) set_rip(x uint64)     { c.regs().rip = x }
 func (c *sigctxt) set_rsp(x uint64)     { c.regs().rsp = x }
 func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
 func (c *sigctxt) set_sigaddr(x uint64) {
-       *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+       *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
 }
index bdb4314fa8dd27e0aea5ce73359416bd9ab73a0c..469f47ca7fbf1392ae464b05e3a83fae8d772120 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 type sigctxt struct {
        info *siginfo
@@ -44,5 +47,5 @@ func (c *sigctxt) set_r10(x uint32) { c.regs().r10 = x }
 
 func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
 func (c *sigctxt) set_sigaddr(x uint32) {
-       *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+       *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
 }
index 7d8b0104251c6aee18770fb6e5ed3feb2fe55951..465fc4ffa377e5e27519792d5a0c8a1344bf51fd 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 type sigctxt struct {
        info *siginfo
@@ -57,5 +60,5 @@ func (c *sigctxt) set_lr(x uint64)  { c.regs().regs[30] = x }
 func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x }
 
 func (c *sigctxt) set_sigaddr(x uint64) {
-       *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+       *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
 }
index da3afc9e9e535b6ed01780163369d4800648493d..5445201b19b64889d0029f8d075bb28ad65526b0 100644 (file)
@@ -7,7 +7,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 type sigctxt struct {
        info *siginfo
@@ -67,5 +70,5 @@ func (c *sigctxt) set_link(x uint64) { c.regs().link = x }
 
 func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
 func (c *sigctxt) set_sigaddr(x uint64) {
-       *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+       *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
 }
index e1e690efc7cf56a1e55e20d68ff2b15984c7d779..774aa42c7717bd52f55ca627ef78539af8ac9595 100644 (file)
@@ -7,7 +7,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 func dumpregs(c *sigctxt) {
        print("r0   ", hex(c.r0()), "\t")
@@ -82,7 +85,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
                // functions are correctly handled. This smashes
                // the stack frame but we're not going back there
                // anyway.
-               sp := c.sp() - minFrameSize
+               sp := c.sp() - sys.MinFrameSize
                c.set_sp(sp)
                *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
 
index f217564aa9afab1c1a14b2b1919c4afff6a6e7bb..db25636885f285835f79715613ab92d6c77c5c2a 100644 (file)
@@ -6,6 +6,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -64,7 +65,7 @@ const (
        // to each stack below the usual guard area for OS-specific
        // purposes like signal handling. Used on Windows, Plan 9,
        // and Darwin/ARM because they do not use a separate stack.
-       _StackSystem = goos_windows*512*ptrSize + goos_plan9*512 + goos_darwin*goarch_arm*1024
+       _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
 
        // The minimum size of stack used by Go code
        _StackMin = 2048
@@ -89,7 +90,7 @@ const (
 
        // The stack guard is a pointer this many bytes above the
        // bottom of the stack.
-       _StackGuard = 640*stackGuardMultiplier + _StackSystem
+       _StackGuard = 640*sys.StackGuardMultiplier + _StackSystem
 
        // After a stack split check the SP is allowed to be this
        // many bytes below the stack guard.  This saves an instruction
@@ -125,7 +126,7 @@ const (
 )
 
 const (
-       uintptrMask = 1<<(8*ptrSize) - 1
+       uintptrMask = 1<<(8*sys.PtrSize) - 1
        poisonStack = uintptrMask & 0x6868686868686868
 
        // Goroutine preemption request.
@@ -536,10 +537,10 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
        num := uintptr(bv.n)
        for i := uintptr(0); i < num; i++ {
                if stackDebug >= 4 {
-                       print("        ", add(scanp, i*ptrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
+                       print("        ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
                }
                if ptrbit(&bv, i) == 1 {
-                       pp := (*uintptr)(add(scanp, i*ptrSize))
+                       pp := (*uintptr)(add(scanp, i*sys.PtrSize))
                        p := *pp
                        if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
                                // Looks like a junk value in a pointer slot.
@@ -587,11 +588,11 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
        // Adjust local variables if stack frame has been allocated.
        size := frame.varp - frame.sp
        var minsize uintptr
-       switch thechar {
+       switch sys.TheChar {
        case '7':
-               minsize = spAlign
+               minsize = sys.SpAlign
        default:
-               minsize = minFrameSize
+               minsize = sys.MinFrameSize
        }
        if size > minsize {
                var bv bitvector
@@ -607,15 +608,15 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
                        throw("bad symbol table")
                }
                bv = stackmapdata(stackmap, pcdata)
-               size = uintptr(bv.n) * ptrSize
+               size = uintptr(bv.n) * sys.PtrSize
                if stackDebug >= 3 {
-                       print("      locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
+                       print("      locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
                }
                adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
        }
 
        // Adjust saved base pointer if there is one.
-       if thechar == '6' && frame.argp-frame.varp == 2*regSize {
+       if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize {
                if !framepointer_enabled {
                        print("runtime: found space for saved base pointer, but no framepointer experiment\n")
                        print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
@@ -841,9 +842,9 @@ func newstack() {
                throw("missing stack in newstack")
        }
        sp := gp.sched.sp
-       if thechar == '6' || thechar == '8' {
+       if sys.TheChar == '6' || sys.TheChar == '8' {
                // The call to morestack cost a word.
-               sp -= ptrSize
+               sp -= sys.PtrSize
        }
        if stackDebug >= 1 || sp < gp.stack.lo {
                print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
@@ -978,7 +979,7 @@ func shrinkstack(gp *g) {
        if gp.syscallsp != 0 {
                return
        }
-       if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
+       if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
                return
        }
 
index 58ea474c7015b23b3401241fdb6afb56aada2fea..9ead56e897329eb864135ed79d094b0bda802359 100644 (file)
@@ -6,12 +6,6 @@ package runtime
 
 import "unsafe"
 
-// Declarations for runtime services implemented in C or assembly.
-
-const ptrSize = 4 << (^uintptr(0) >> 63)             // unsafe.Sizeof(uintptr(0)) but an ideal const
-const regSize = 4 << (^uintreg(0) >> 63)             // unsafe.Sizeof(uintreg(0)) but an ideal const
-const spAlign = 1*(1-goarch_arm64) + 16*goarch_arm64 // SP alignment: 1 normally, 16 for ARM64
-
 // Should be a built-in for unsafe.Pointer?
 //go:nosplit
 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
index c3235fac036d131ae0e2b8be7f15fda4e3061a9d..00b0a850e0d4635239d1112f717243238bd6d2a9 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // NOTE: Func does not expose the actual unexported fields, because we return *Func
 // values to users, and we want to keep them from being able to overwrite the data
@@ -105,7 +108,7 @@ func moduledataverify1(datap *moduledata) {
        // and a byte giving the pointer width in bytes.
        pcln := *(**[8]byte)(unsafe.Pointer(&datap.pclntable))
        pcln32 := *(**[2]uint32)(unsafe.Pointer(&datap.pclntable))
-       if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != _PCQuantum || pcln[7] != ptrSize {
+       if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != sys.PCQuantum || pcln[7] != sys.PtrSize {
                println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7]))
                throw("invalid function symbol table\n")
        }
@@ -358,7 +361,7 @@ func funcline(f *_func, targetpc uintptr) (file string, line int32) {
 
 func funcspdelta(f *_func, targetpc uintptr, cache *pcvalueCache) int32 {
        x := pcvalue(f, f.pcsp, targetpc, cache, true)
-       if x&(ptrSize-1) != 0 {
+       if x&(sys.PtrSize-1) != 0 {
                print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
        }
        return x
@@ -377,13 +380,13 @@ func funcdata(f *_func, i int32) unsafe.Pointer {
                return nil
        }
        p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4)
-       if ptrSize == 8 && uintptr(p)&4 != 0 {
+       if sys.PtrSize == 8 && uintptr(p)&4 != 0 {
                if uintptr(unsafe.Pointer(f))&4 != 0 {
                        println("runtime: misaligned func", f)
                }
                p = add(p, 4)
        }
-       return *(*unsafe.Pointer)(add(p, uintptr(i)*ptrSize))
+       return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize))
 }
 
 // step advances to the next pc, value pair in the encoded table.
@@ -399,7 +402,7 @@ func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool)
        }
        vdelta := int32(uvdelta)
        p, pcdelta := readvarint(p)
-       *pc += uintptr(pcdelta * _PCQuantum)
+       *pc += uintptr(pcdelta * sys.PCQuantum)
        *val += vdelta
        return p, true
 }
index 3f0771fe0cfa7855f6f596e5b4c7694ed91269e8..137e706d244723e98d45dbcf0ad16ecbbab99f5c 100644 (file)
@@ -6,17 +6,20 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // adjust Gobuf as it if executed a call to fn with context ctxt
 // and then did an immediate gosave.
 func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
        sp := buf.sp
-       if regSize > ptrSize {
-               sp -= ptrSize
+       if sys.RegSize > sys.PtrSize {
+               sp -= sys.PtrSize
                *(*uintptr)(unsafe.Pointer(sp)) = 0
        }
-       sp -= ptrSize
+       sp -= sys.PtrSize
        *(*uintptr)(unsafe.Pointer(sp)) = buf.pc
        buf.sp = sp
        buf.pc = uintptr(fn)
index d85b0a985cbd6e05f336408beddfa83a487050e7..46ddb64374b6a733bb990dcd05d4e151d7406669 100644 (file)
@@ -14,6 +14,7 @@ package runtime
 
 import (
        "runtime/internal/atomic"
+       "runtime/internal/sys"
        "unsafe"
 )
 
@@ -69,7 +70,7 @@ const (
        // and ppc64le.
        // Tracing won't work reliably for architectures where cputicks is emulated
        // by nanotime, so the value doesn't matter for those architectures.
-       traceTickDiv = 16 + 48*(goarch_386|goarch_amd64|goarch_amd64p32)
+       traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
        // Maximum number of PCs in a single stack trace.
        // Since events contain only stack id rather than whole stack trace,
        // we can allow quite large values here.
@@ -704,7 +705,7 @@ Search:
 
 // newStack allocates a new stack of size n.
 func (tab *traceStackTable) newStack(n int) *traceStack {
-       return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*ptrSize))
+       return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
 }
 
 // dump writes all previously cached stacks to trace buffers,
@@ -751,12 +752,12 @@ type traceAlloc struct {
 // traceAllocBlock is a block in traceAlloc.
 type traceAllocBlock struct {
        next *traceAllocBlock
-       data [64<<10 - ptrSize]byte
+       data [64<<10 - sys.PtrSize]byte
 }
 
 // alloc allocates n-byte block.
 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
-       n = round(n, ptrSize)
+       n = round(n, sys.PtrSize)
        if a.head == nil || a.off+n > uintptr(len(a.head.data)) {
                if n > uintptr(len(a.head.data)) {
                        throw("trace: alloc too large")
index 8b33e4a29e241ab4177f8762e00eba5614f375e0..56fbbeae11fb7e95ea41718afb4d0c5e481f10ae 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // The code in this file implements stack trace walking for all architectures.
 // The most important fact about a given architecture is whether it uses a link register.
@@ -29,7 +32,7 @@ import "unsafe"
 // usesLR is defined below in terms of minFrameSize, which is defined in
 // arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go.
 
-const usesLR = minFrameSize > 0
+const usesLR = sys.MinFrameSize > 0
 
 var (
        // initialized in tracebackinit
@@ -181,8 +184,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
                        frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
                        frame.lr = 0
                } else {
-                       frame.pc = uintptr(*(*uintreg)(unsafe.Pointer(frame.sp)))
-                       frame.sp += regSize
+                       frame.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(frame.sp)))
+                       frame.sp += sys.RegSize
                }
        }
 
@@ -222,7 +225,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
                        frame.fp = sp + uintptr(funcspdelta(f, frame.pc, &cache))
                        if !usesLR {
                                // On x86, call instruction pushes return PC before entering new function.
-                               frame.fp += regSize
+                               frame.fp += sys.RegSize
                        }
                }
                var flr *_func
@@ -249,8 +252,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
                                }
                        } else {
                                if frame.lr == 0 {
-                                       lrPtr = frame.fp - regSize
-                                       frame.lr = uintptr(*(*uintreg)(unsafe.Pointer(lrPtr)))
+                                       lrPtr = frame.fp - sys.RegSize
+                                       frame.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer(lrPtr)))
                                }
                        }
                        if frame.lr == stackBarrierPC {
@@ -280,13 +283,13 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
                frame.varp = frame.fp
                if !usesLR {
                        // On x86, call instruction pushes return PC before entering new function.
-                       frame.varp -= regSize
+                       frame.varp -= sys.RegSize
                }
 
                // If framepointer_enabled and there's a frame, then
                // there's a saved bp here.
                if framepointer_enabled && GOARCH == "amd64" && frame.varp > frame.sp {
-                       frame.varp -= regSize
+                       frame.varp -= sys.RegSize
                }
 
                // Derive size of arguments.
@@ -296,7 +299,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
                // in package runtime and reflect, and for those we use call-specific
                // metadata recorded by f's caller.
                if callback != nil || printing {
-                       frame.argp = frame.fp + minFrameSize
+                       frame.argp = frame.fp + sys.MinFrameSize
                        setArgInfo(&frame, f, callback != nil)
                }
 
@@ -349,7 +352,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
                                }
                                print(funcname(f), "(")
                                argp := (*[100]uintptr)(unsafe.Pointer(frame.argp))
-                               for i := uintptr(0); i < frame.arglen/ptrSize; i++ {
+                               for i := uintptr(0); i < frame.arglen/sys.PtrSize; i++ {
                                        if i >= 10 {
                                                print(", ...")
                                                break
@@ -394,10 +397,10 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
                // before faking a call to sigpanic.
                if usesLR && waspanic {
                        x := *(*uintptr)(unsafe.Pointer(frame.sp))
-                       frame.sp += minFrameSize
+                       frame.sp += sys.MinFrameSize
                        if GOARCH == "arm64" {
                                // arm64 needs 16-byte aligned SP, always
-                               frame.sp += ptrSize
+                               frame.sp += sys.PtrSize
                        }
                        f = findfunc(frame.pc)
                        frame.fn = f
@@ -494,14 +497,14 @@ func setArgInfo(frame *stkframe, f *_func, needArgMap bool) {
                // Extract argument bitmaps for reflect stubs from the calls they made to reflect.
                switch funcname(f) {
                case "reflect.makeFuncStub", "reflect.methodValueCall":
-                       arg0 := frame.sp + minFrameSize
+                       arg0 := frame.sp + sys.MinFrameSize
                        fn := *(**[2]uintptr)(unsafe.Pointer(arg0))
                        if fn[0] != f.entry {
                                print("runtime: confused by ", funcname(f), "\n")
                                throw("reflect mismatch")
                        }
                        bv := (*bitvector)(unsafe.Pointer(fn[1]))
-                       frame.arglen = uintptr(bv.n * ptrSize)
+                       frame.arglen = uintptr(bv.n * sys.PtrSize)
                        frame.argmap = bv
                }
        }
@@ -515,7 +518,7 @@ func printcreatedby(gp *g) {
                print("created by ", funcname(f), "\n")
                tracepc := pc // back up to CALL instruction for funcline.
                if pc > f.entry {
-                       tracepc -= _PCQuantum
+                       tracepc -= sys.PCQuantum
                }
                file, line := funcline(f, tracepc)
                print("\t", file, ":", line)
index 244001590a02422ceda8b5eced7b01b67cca628f..38914bb2b9f0912b5caf76a03d03981eaecea2b0 100644 (file)
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
 
 // Look up symbols in the Linux vDSO.
 
@@ -303,7 +306,7 @@ func sysargs(argc int32, argv **byte) {
        n++
 
        // now argv+n is auxv
-       auxv := (*[1 << 32]elf64Auxv)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+       auxv := (*[1 << 32]elf64Auxv)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
 
        for i := 0; auxv[i].a_type != _AT_NULL; i++ {
                av := &auxv[i]
diff --git a/src/runtime/zgoos_android.go b/src/runtime/zgoos_android.go
deleted file mode 100644 (file)
index 0590bd9..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `android`
-
-const goos_android = 1
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_darwin.go b/src/runtime/zgoos_darwin.go
deleted file mode 100644 (file)
index c0a7cd6..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `darwin`
-
-const goos_android = 0
-const goos_darwin = 1
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_dragonfly.go b/src/runtime/zgoos_dragonfly.go
deleted file mode 100644 (file)
index 008d6de..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `dragonfly`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 1
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_freebsd.go b/src/runtime/zgoos_freebsd.go
deleted file mode 100644 (file)
index 2478940..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `freebsd`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 1
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_linux.go b/src/runtime/zgoos_linux.go
deleted file mode 100644 (file)
index c775ab5..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-// +build !android
-
-package runtime
-
-const theGoos = `linux`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 1
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_nacl.go b/src/runtime/zgoos_nacl.go
deleted file mode 100644 (file)
index d9d88f4..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `nacl`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 1
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_netbsd.go b/src/runtime/zgoos_netbsd.go
deleted file mode 100644 (file)
index ff2c5cb..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `netbsd`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 1
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_openbsd.go b/src/runtime/zgoos_openbsd.go
deleted file mode 100644 (file)
index b071dc6..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `openbsd`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 1
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_plan9.go b/src/runtime/zgoos_plan9.go
deleted file mode 100644 (file)
index 4306b0f..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `plan9`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 1
-const goos_solaris = 0
-const goos_windows = 0
diff --git a/src/runtime/zgoos_solaris.go b/src/runtime/zgoos_solaris.go
deleted file mode 100644 (file)
index 10f9537..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `solaris`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 1
-const goos_windows = 0
diff --git a/src/runtime/zgoos_windows.go b/src/runtime/zgoos_windows.go
deleted file mode 100644 (file)
index 56f5c58..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// generated by gengoos.go using 'go generate'
-
-package runtime
-
-const theGoos = `windows`
-
-const goos_android = 0
-const goos_darwin = 0
-const goos_dragonfly = 0
-const goos_freebsd = 0
-const goos_linux = 0
-const goos_nacl = 0
-const goos_netbsd = 0
-const goos_openbsd = 0
-const goos_plan9 = 0
-const goos_solaris = 0
-const goos_windows = 1