const ticksPerSecond = 1e7 // Windows timestamp resolution
ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
- secs := int64(ts / ticksPerSecond)
- nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
+ secs := ts / ticksPerSecond
+ nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs)
}
p.typeData = append(p.typeData, allTypeData[to.offset:to.offset+to.length])
}
- for i := 1; i < int(exportedp1); i++ {
+ for i := 1; i < exportedp1; i++ {
p.parseSavedType(pkg, i, nil)
}
}
// then the algorithm runs a little faster.
// If sais_8_32 modifies tmp, it sets tmp[0] = -1 on return.
func sais_8_32(text []byte, textMax int, sa, tmp []int32) {
- if len(sa) != len(text) || len(tmp) < int(textMax) {
+ if len(sa) != len(text) || len(tmp) < textMax {
panic("suffixarray: misuse of sais_8_32")
}
func fnvUint64(h uint64, x uint64) uint64 {
for i := 0; i < 8; i++ {
- h ^= uint64(x & 0xFF)
+ h ^= x & 0xFF
x >>= 8
h *= prime64
}
case uint32:
vals[i] = uint32(m.mutateUInt(uint64(v), math.MaxUint32))
case uint64:
- vals[i] = m.mutateUInt(uint64(v), maxUint)
+ vals[i] = m.mutateUInt(v, maxUint)
case float32:
vals[i] = float32(m.mutateFloat(float64(v), math.MaxFloat32))
case float64:
if len(data) != 8 {
return 0, typ, errors.New("QWORD value is not 8 bytes long")
}
- return uint64(*(*uint64)(unsafe.Pointer(&data[0]))), QWORD, nil
+ return *(*uint64)(unsafe.Pointer(&data[0])), QWORD, nil
default:
return 0, typ, ErrUnexpectedType
}
// some extra as a result of trying to find an aligned region.
//
// Divide it up and put it on the ready list.
- for i := uintptr(userArenaChunkBytes); i < size; i += userArenaChunkBytes {
+ for i := userArenaChunkBytes; i < size; i += userArenaChunkBytes {
s := h.allocMSpanLocked()
s.init(uintptr(v)+i, userArenaChunkPages)
h.userArena.readyList.insertBack(s)
dumpint(uint64(uintptr(unsafe.Pointer(gp))))
eface := efaceOf(&p.arg)
dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
- dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
+ dumpint(uint64(uintptr(eface.data)))
dumpint(0) // was p->defer, no longer recorded
dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
}
//
// If this span was cached before sweep, then gcController.heapLive was totally
// recomputed since caching this span, so we don't do this for stale spans.
- dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize)
+ dHeapLive -= int64(s.nelems-uintptr(s.allocCount)) * int64(s.elemsize)
}
// Release the span to the mcentral.
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
- hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
+ hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount
// Cut off the first index which is ostensibly for size class 0,
// but large objects are tracked separately so it's actually unused.
for i, count := range in.heapStats.smallAllocCount[1:] {
- hist.counts[i] = uint64(count)
+ hist.counts[i] = count
}
},
},
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
- hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
+ hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount
// Cut off the first index which is ostensibly for size class 0,
// but large objects are tracked separately so it's actually unused.
for i, count := range in.heapStats.smallFreeCount[1:] {
- hist.counts[i] = uint64(count)
+ hist.counts[i] = count
}
},
},
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
- out.scalar = uint64(in.heapStats.tinyAllocCount)
+ out.scalar = in.heapStats.tinyAllocCount
},
},
"/gc/limiter/last-enabled:gc-cycle": {
// compute populates the gcStatsAggregate with values from the runtime.
func (a *gcStatsAggregate) compute() {
a.heapScan = gcController.heapScan.Load()
- a.stackScan = uint64(gcController.lastStackScan.Load())
+ a.stackScan = gcController.lastStackScan.Load()
a.globalsScan = gcController.globalsScan.Load()
a.totalScan = a.heapScan + a.stackScan + a.globalsScan
}
// compute size needed for return parameters
nret := uintptr(0)
for _, t := range ft.OutSlice() {
- nret = alignUp(nret, uintptr(t.Align_)) + uintptr(t.Size_)
+ nret = alignUp(nret, uintptr(t.Align_)) + t.Size_
}
nret = alignUp(nret, goarch.PtrSize)
// increase in RSS. By capping us at a point >0, we're essentially
// saying that we're OK using more CPU during the GC to prevent
// this growth in RSS.
- triggerLowerBound := uint64(((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum) + c.heapMarked
+ triggerLowerBound := ((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum + c.heapMarked
if minTrigger < triggerLowerBound {
minTrigger = triggerLowerBound
}
// to reflect the costs of a GC with no work to do. With a large heap but
// very little scan work to perform, this gives us exactly as much runway
// as we would need, in the worst case.
- maxTrigger := uint64(((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum) + c.heapMarked
+ maxTrigger := ((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum + c.heapMarked
if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger {
maxTrigger = goal - defaultHeapMinimum
}
// to include that huge page.
// Compute the huge page boundary above our candidate.
- pagesPerHugePage := uintptr(physHugePageSize / pageSize)
+ pagesPerHugePage := physHugePageSize / pageSize
hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
// If that boundary is within our current candidate, then we may be breaking
// Starting from searchAddr's chunk, iterate until we find a chunk with pages to scavenge.
gen := s.gen
min := chunkIdx(s.minHeapIdx.Load())
- start := chunkIndex(uintptr(searchAddr))
+ start := chunkIndex(searchAddr)
// N.B. We'll never map the 0'th chunk, so minHeapIdx ensures this loop overflow.
for i := start; i >= min; i-- {
// Skip over chunks.
}
// We're still scavenging this chunk.
if i == start {
- return i, chunkPageIndex(uintptr(searchAddr))
+ return i, chunkPageIndex(searchAddr)
}
// Try to reduce searchAddr to newSearchAddr.
newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize
// newMarkBits returns a pointer to 8 byte aligned bytes
// to be used for a span's mark bits.
func newMarkBits(nelems uintptr) *gcBits {
- blocksNeeded := uintptr((nelems + 63) / 64)
+ blocksNeeded := (nelems + 63) / 64
bytesNeeded := blocksNeeded * 8
// Try directly allocating from the current head arena.
result.next = nil
// If result.bits is not 8 byte aligned adjust index so
// that &result.bits[result.free] is 8 byte aligned.
- if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
+ if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 {
result.free = 0
} else {
result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
// lookup returns &s[idx].
func (s spanSetSpinePointer) lookup(idx uintptr) *atomic.Pointer[spanSetBlock] {
- return (*atomic.Pointer[spanSetBlock])(add(unsafe.Pointer(s.p), goarch.PtrSize*idx))
+ return (*atomic.Pointer[spanSetBlock])(add(s.p, goarch.PtrSize*idx))
}
// spanSetBlockPool is a global pool of spanSetBlocks.
var r uint32
var shift int
for {
- b := *(*uint8)((unsafe.Pointer(fd)))
+ b := *(*uint8)(fd)
fd = add(fd, unsafe.Sizeof(b))
if b < 128 {
return r + uint32(b)<<shift, fd
systemstack(func() {
var limit uintptr
if d := gp._defer; d != nil {
- limit = uintptr(d.sp)
+ limit = d.sp
}
var u unwinder
// so there is no need for a deletion barrier on b.tags[wt].
wt := int(bw.tagCount() % uint32(len(b.tags)))
if tagPtr != nil {
- *(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(unsafe.Pointer(*tagPtr))
+ *(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(*tagPtr)
}
// Main record.
// Won the race, report overflow.
dst := b.overflowBuf
dst[0] = uint64(2 + b.hdrsize + 1)
- dst[1] = uint64(time)
+ dst[1] = time
for i := uintptr(0); i < b.hdrsize; i++ {
dst[2+i] = 0
}
if !retValid {
// argMap.n includes the results, but
// those aren't valid, so drop them.
- n := int32((uintptr(mv.argLen) &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
+ n := int32((mv.argLen &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
if n < argMap.n {
argMap.n = n
}
func bool2int(x bool) int {
// Avoid branches. In the SSA compiler, this compiles to
// exactly what you would want it to.
- return int(uint8(*(*uint8)(unsafe.Pointer(&x))))
+ return int(*(*uint8)(unsafe.Pointer(&x)))
}
// abort crashes the runtime in situations where even throw might not
func updateTimerModifiedEarliest(pp *p, nextwhen int64) {
for {
old := pp.timerModifiedEarliest.Load()
- if old != 0 && int64(old) < nextwhen {
+ if old != 0 && old < nextwhen {
return
}
}
stackID := trace.stackTab.put(buf.stk[:nstk])
- traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, uint64(timestamp), ppid, goid)
+ traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp, ppid, goid)
}
}
}
frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
frame.lr = 0
} else {
- frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
+ frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
frame.sp += goarch.PtrSize
}
}
if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&psetgroups[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
goto childerror
}
- pid, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&setgroups[0])), uintptr(len(setgroups)))
+ pid, _, err1 = RawSyscall(SYS_WRITE, fd1, uintptr(unsafe.Pointer(&setgroups[0])), uintptr(len(setgroups)))
if err1 != 0 {
goto childerror
}
- if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+ if _, _, err1 = RawSyscall(SYS_CLOSE, fd1, 0, 0); err1 != 0 {
goto childerror
}
if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&pgid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
goto childerror
}
- pid, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&gidmap[0])), uintptr(len(gidmap)))
+ pid, _, err1 = RawSyscall(SYS_WRITE, fd1, uintptr(unsafe.Pointer(&gidmap[0])), uintptr(len(gidmap)))
if err1 != 0 {
goto childerror
}
- if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+ if _, _, err1 = RawSyscall(SYS_CLOSE, fd1, 0, 0); err1 != 0 {
goto childerror
}
}
if fd1, _, err1 = RawSyscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(&puid[0])), uintptr(O_WRONLY), 0, 0, 0); err1 != 0 {
goto childerror
}
- pid, _, err1 = RawSyscall(SYS_WRITE, uintptr(fd1), uintptr(unsafe.Pointer(&uidmap[0])), uintptr(len(uidmap)))
+ pid, _, err1 = RawSyscall(SYS_WRITE, fd1, uintptr(unsafe.Pointer(&uidmap[0])), uintptr(len(uidmap)))
if err1 != 0 {
goto childerror
}
- if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(fd1), 0, 0); err1 != 0 {
+ if _, _, err1 = RawSyscall(SYS_CLOSE, fd1, 0, 0); err1 != 0 {
goto childerror
}
}
*(*uint16)(unsafe.Pointer(&b[6:8][0])) = rr.Header.Flags
*(*uint32)(unsafe.Pointer(&b[8:12][0])) = rr.Header.Seq
*(*uint32)(unsafe.Pointer(&b[12:16][0])) = rr.Header.Pid
- b[16] = byte(rr.Data.Family)
+ b[16] = rr.Data.Family
return b
}