3 // Copyright 2018 The Go Authors. All rights reserved.
4 // Use of this source code is governed by a BSD-style
5 // license that can be found in the LICENSE file.
11 // ----------------------- //
12 // bits.LeadingZeros //
13 // ----------------------- //
15 func LeadingZeros(n uint) int {
16 // amd64/v1,amd64/v2:"BSRQ"
17 // amd64/v3:"LZCNTQ", -"BSRQ"
19 // arm:"CLZ" arm64:"CLZ"
22 return bits.LeadingZeros(n)
25 func LeadingZeros64(n uint64) int {
26 // amd64/v1,amd64/v2:"BSRQ"
27 // amd64/v3:"LZCNTQ", -"BSRQ"
29 // arm:"CLZ" arm64:"CLZ"
32 return bits.LeadingZeros64(n)
35 func LeadingZeros32(n uint32) int {
36 // amd64/v1,amd64/v2:"BSRQ","LEAQ",-"CMOVQEQ"
37 // amd64/v3: "LZCNTL",- "BSRL"
39 // arm:"CLZ" arm64:"CLZW"
42 return bits.LeadingZeros32(n)
45 func LeadingZeros16(n uint16) int {
46 // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
47 // amd64/v3: "LZCNTL",- "BSRL"
49 // arm:"CLZ" arm64:"CLZ"
52 return bits.LeadingZeros16(n)
55 func LeadingZeros8(n uint8) int {
56 // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
57 // amd64/v3: "LZCNTL",- "BSRL"
59 // arm:"CLZ" arm64:"CLZ"
62 return bits.LeadingZeros8(n)
69 func Len(n uint) int {
70 // amd64/v1,amd64/v2:"BSRQ"
73 // arm:"CLZ" arm64:"CLZ"
79 func Len64(n uint64) int {
80 // amd64/v1,amd64/v2:"BSRQ"
83 // arm:"CLZ" arm64:"CLZ"
86 // ppc64le:"SUBC","CNTLZD"
87 // ppc64:"SUBC","CNTLZD"
91 func SubFromLen64(n uint64) int {
92 // ppc64le:"CNTLZD",-"SUBC"
93 // ppc64:"CNTLZD",-"SUBC"
94 return 64 - bits.Len64(n)
97 func Len32(n uint32) int {
98 // amd64/v1,amd64/v2:"BSRQ","LEAQ",-"CMOVQEQ"
101 // arm:"CLZ" arm64:"CLZ"
109 func Len16(n uint16) int {
110 // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
111 // amd64/v3: "LZCNTL"
113 // arm:"CLZ" arm64:"CLZ"
119 func Len8(n uint8) int {
120 // amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
121 // amd64/v3: "LZCNTL"
123 // arm:"CLZ" arm64:"CLZ"
129 // -------------------- //
131 // -------------------- //
133 // TODO(register args) Restore a m d 6 4 / v 1 :.*x86HasPOPCNT when only one ABI is tested.
134 func OnesCount(n uint) int {
135 // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
137 // arm64:"VCNT","VUADDLV"
142 return bits.OnesCount(n)
145 func OnesCount64(n uint64) int {
146 // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
148 // arm64:"VCNT","VUADDLV"
153 return bits.OnesCount64(n)
156 func OnesCount32(n uint32) int {
157 // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
159 // arm64:"VCNT","VUADDLV"
164 return bits.OnesCount32(n)
167 func OnesCount16(n uint16) int {
168 // amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
170 // arm64:"VCNT","VUADDLV"
175 return bits.OnesCount16(n)
178 func OnesCount8(n uint8) int {
183 return bits.OnesCount8(n)
186 // ----------------------- //
187 // bits.ReverseBytes //
188 // ----------------------- //
190 func ReverseBytes(n uint) uint {
194 return bits.ReverseBytes(n)
197 func ReverseBytes64(n uint64) uint64 {
201 return bits.ReverseBytes64(n)
204 func ReverseBytes32(n uint32) uint32 {
208 return bits.ReverseBytes32(n)
211 func ReverseBytes16(n uint16) uint16 {
213 // arm64:"REV16W",-"UBFX",-"ORR"
214 // arm/5:"SLL","SRL","ORR"
217 return bits.ReverseBytes16(n)
220 // --------------------- //
221 // bits.RotateLeft //
222 // --------------------- //
224 func RotateLeft64(n uint64) uint64 {
229 // s390x:"RISBGZ\t[$]0, [$]63, [$]37, "
231 return bits.RotateLeft64(n, 37)
234 func RotateLeft32(n uint32) uint32 {
235 // amd64:"ROLL" 386:"ROLL"
236 // arm:`MOVW\tR[0-9]+@>23`
242 return bits.RotateLeft32(n, 9)
245 func RotateLeft16(n uint16) uint16 {
246 // amd64:"ROLW" 386:"ROLW"
247 return bits.RotateLeft16(n, 5)
250 func RotateLeft8(n uint8) uint8 {
251 // amd64:"ROLB" 386:"ROLB"
252 return bits.RotateLeft8(n, 5)
255 func RotateLeftVariable(n uint, m int) uint {
262 return bits.RotateLeft(n, m)
265 func RotateLeftVariable64(n uint64, m int) uint64 {
272 return bits.RotateLeft64(n, m)
275 func RotateLeftVariable32(n uint32, m int) uint32 {
276 // arm:`MOVW\tR[0-9]+@>R[0-9]+`
283 return bits.RotateLeft32(n, m)
286 // ------------------------ //
287 // bits.TrailingZeros //
288 // ------------------------ //
290 func TrailingZeros(n uint) int {
291 // amd64/v1,amd64/v2:"BSFQ","MOVL\t\\$64","CMOVQEQ"
294 // arm64:"RBIT","CLZ"
296 // ppc64/power8:"ANDN","POPCNTD"
297 // ppc64le/power8:"ANDN","POPCNTD"
298 // ppc64/power9: "CNTTZD"
299 // ppc64le/power9: "CNTTZD"
301 return bits.TrailingZeros(n)
304 func TrailingZeros64(n uint64) int {
305 // amd64/v1,amd64/v2:"BSFQ","MOVL\t\\$64","CMOVQEQ"
307 // arm64:"RBIT","CLZ"
309 // ppc64/power8:"ANDN","POPCNTD"
310 // ppc64le/power8:"ANDN","POPCNTD"
311 // ppc64/power9: "CNTTZD"
312 // ppc64le/power9: "CNTTZD"
314 return bits.TrailingZeros64(n)
317 func TrailingZeros64Subtract(n uint64) int {
318 // ppc64le/power8:"NEG","SUBC","ANDN","POPCNTD"
319 // ppc64le/power9:"SUBC","CNTTZD"
320 return bits.TrailingZeros64(1 - n)
323 func TrailingZeros32(n uint32) int {
324 // amd64/v1,amd64/v2:"BTSQ\\t\\$32","BSFQ"
327 // arm64:"RBITW","CLZW"
328 // s390x:"FLOGR","MOVWZ"
329 // ppc64/power8:"ANDN","POPCNTW"
330 // ppc64le/power8:"ANDN","POPCNTW"
331 // ppc64/power9: "CNTTZW"
332 // ppc64le/power9: "CNTTZW"
334 return bits.TrailingZeros32(n)
337 func TrailingZeros16(n uint16) int {
338 // amd64:"BSFL","BTSL\\t\\$16"
340 // arm:"ORR\t\\$65536","CLZ",-"MOVHU\tR"
341 // arm64:"ORR\t\\$65536","RBITW","CLZW",-"MOVHU\tR",-"RBIT\t",-"CLZ\t"
342 // s390x:"FLOGR","OR\t\\$65536"
343 // ppc64/power8:"POPCNTD","OR\\t\\$65536"
344 // ppc64le/power8:"POPCNTD","OR\\t\\$65536"
345 // ppc64/power9:"CNTTZD","OR\\t\\$65536"
346 // ppc64le/power9:"CNTTZD","OR\\t\\$65536"
348 return bits.TrailingZeros16(n)
351 func TrailingZeros8(n uint8) int {
352 // amd64:"BSFL","BTSL\\t\\$8"
353 // arm:"ORR\t\\$256","CLZ",-"MOVBU\tR"
354 // arm64:"ORR\t\\$256","RBITW","CLZW",-"MOVBU\tR",-"RBIT\t",-"CLZ\t"
355 // s390x:"FLOGR","OR\t\\$256"
357 return bits.TrailingZeros8(n)
360 // IterateBitsNN checks special handling of TrailingZerosNN when the input is known to be non-zero.
362 func IterateBits(n uint) int {
365 // amd64/v1,amd64/v2:"BSFQ",-"CMOVEQ"
367 i += bits.TrailingZeros(n)
373 func IterateBits64(n uint64) int {
376 // amd64/v1,amd64/v2:"BSFQ",-"CMOVEQ"
378 i += bits.TrailingZeros64(n)
384 func IterateBits32(n uint32) int {
387 // amd64/v1,amd64/v2:"BSFL",-"BTSQ"
389 i += bits.TrailingZeros32(n)
395 func IterateBits16(n uint16) int {
398 // amd64/v1,amd64/v2:"BSFL",-"BTSL"
400 // arm64:"RBITW","CLZW",-"ORR"
401 i += bits.TrailingZeros16(n)
407 func IterateBits8(n uint8) int {
410 // amd64/v1,amd64/v2:"BSFL",-"BTSL"
412 // arm64:"RBITW","CLZW",-"ORR"
413 i += bits.TrailingZeros8(n)
419 // --------------- //
421 // --------------- //
423 func Add(x, y, ci uint) (r, co uint) {
424 // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
425 // amd64:"NEGL","ADCQ","SBBQ","NEGQ"
426 // ppc64: "ADDC", "ADDE", "ADDZE"
427 // ppc64le: "ADDC", "ADDE", "ADDZE"
428 // s390x:"ADDE","ADDC\t[$]-1,"
429 return bits.Add(x, y, ci)
432 func AddC(x, ci uint) (r, co uint) {
433 // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
434 // amd64:"NEGL","ADCQ","SBBQ","NEGQ"
435 // ppc64: "ADDC", "ADDE", "ADDZE"
436 // ppc64le: "ADDC", "ADDE", "ADDZE"
437 // s390x:"ADDE","ADDC\t[$]-1,"
438 return bits.Add(x, 7, ci)
441 func AddZ(x, y uint) (r, co uint) {
442 // arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP"
443 // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ"
444 // ppc64: "ADDC", -"ADDE", "ADDZE"
445 // ppc64le: "ADDC", -"ADDE", "ADDZE"
446 // s390x:"ADDC",-"ADDC\t[$]-1,"
447 return bits.Add(x, y, 0)
450 func AddR(x, y, ci uint) uint {
451 // arm64:"ADDS","ADCS",-"ADD\t",-"CMP"
452 // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ"
453 // ppc64: "ADDC", "ADDE", -"ADDZE"
454 // ppc64le: "ADDC", "ADDE", -"ADDZE"
455 // s390x:"ADDE","ADDC\t[$]-1,"
456 r, _ := bits.Add(x, y, ci)
460 func AddM(p, q, r *[3]uint) {
462 r[0], c = bits.Add(p[0], q[0], c)
463 // arm64:"ADCS",-"ADD\t",-"CMP"
464 // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ"
465 // s390x:"ADDE",-"ADDC\t[$]-1,"
466 r[1], c = bits.Add(p[1], q[1], c)
467 r[2], c = bits.Add(p[2], q[2], c)
470 func Add64(x, y, ci uint64) (r, co uint64) {
471 // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
472 // amd64:"NEGL","ADCQ","SBBQ","NEGQ"
473 // ppc64: "ADDC", "ADDE", "ADDZE"
474 // ppc64le: "ADDC", "ADDE", "ADDZE"
475 // s390x:"ADDE","ADDC\t[$]-1,"
476 return bits.Add64(x, y, ci)
479 func Add64C(x, ci uint64) (r, co uint64) {
480 // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
481 // amd64:"NEGL","ADCQ","SBBQ","NEGQ"
482 // ppc64: "ADDC", "ADDE", "ADDZE"
483 // ppc64le: "ADDC", "ADDE", "ADDZE"
484 // s390x:"ADDE","ADDC\t[$]-1,"
485 return bits.Add64(x, 7, ci)
488 func Add64Z(x, y uint64) (r, co uint64) {
489 // arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP"
490 // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ"
491 // ppc64: "ADDC", -"ADDE", "ADDZE"
492 // ppc64le: "ADDC", -"ADDE", "ADDZE"
493 // s390x:"ADDC",-"ADDC\t[$]-1,"
494 return bits.Add64(x, y, 0)
497 func Add64R(x, y, ci uint64) uint64 {
498 // arm64:"ADDS","ADCS",-"ADD\t",-"CMP"
499 // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ"
500 // ppc64: "ADDC", "ADDE", -"ADDZE"
501 // ppc64le: "ADDC", "ADDE", -"ADDZE"
502 // s390x:"ADDE","ADDC\t[$]-1,"
503 r, _ := bits.Add64(x, y, ci)
506 func Add64M(p, q, r *[3]uint64) {
508 r[0], c = bits.Add64(p[0], q[0], c)
509 // arm64:"ADCS",-"ADD\t",-"CMP"
510 // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ"
511 // ppc64: -"ADDC", "ADDE", -"ADDZE"
512 // ppc64le: -"ADDC", "ADDE", -"ADDZE"
513 // s390x:"ADDE",-"ADDC\t[$]-1,"
514 r[1], c = bits.Add64(p[1], q[1], c)
515 r[2], c = bits.Add64(p[2], q[2], c)
518 func Add64MSaveC(p, q, r, c *[2]uint64) {
519 // ppc64: "ADDC\tR", "ADDZE"
520 // ppc64le: "ADDC\tR", "ADDZE"
521 r[0], c[0] = bits.Add64(p[0], q[0], 0)
522 // ppc64: "ADDC\t[$]-1", "ADDE", "ADDZE"
523 // ppc64le: "ADDC\t[$]-1", "ADDE", "ADDZE"
524 r[1], c[1] = bits.Add64(p[1], q[1], c[0])
527 func Add64PanicOnOverflowEQ(a, b uint64) uint64 {
528 r, c := bits.Add64(a, b, 0)
529 // s390x:"BRC\t[$]3,",-"ADDE"
536 func Add64PanicOnOverflowNE(a, b uint64) uint64 {
537 r, c := bits.Add64(a, b, 0)
538 // s390x:"BRC\t[$]3,",-"ADDE"
545 func Add64PanicOnOverflowGT(a, b uint64) uint64 {
546 r, c := bits.Add64(a, b, 0)
547 // s390x:"BRC\t[$]3,",-"ADDE"
554 func Add64MPanicOnOverflowEQ(a, b [2]uint64) [2]uint64 {
557 r[0], c = bits.Add64(a[0], b[0], c)
558 r[1], c = bits.Add64(a[1], b[1], c)
559 // s390x:"BRC\t[$]3,"
566 func Add64MPanicOnOverflowNE(a, b [2]uint64) [2]uint64 {
569 r[0], c = bits.Add64(a[0], b[0], c)
570 r[1], c = bits.Add64(a[1], b[1], c)
571 // s390x:"BRC\t[$]3,"
578 func Add64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 {
581 r[0], c = bits.Add64(a[0], b[0], c)
582 r[1], c = bits.Add64(a[1], b[1], c)
583 // s390x:"BRC\t[$]3,"
590 // --------------- //
592 // --------------- //
594 func Sub(x, y, ci uint) (r, co uint) {
595 // amd64:"NEGL","SBBQ","NEGQ"
596 // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
597 // ppc64:"SUBC", "SUBE", "SUBZE", "NEG"
598 // ppc64le:"SUBC", "SUBE", "SUBZE", "NEG"
600 return bits.Sub(x, y, ci)
603 func SubC(x, ci uint) (r, co uint) {
604 // amd64:"NEGL","SBBQ","NEGQ"
605 // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
606 // ppc64:"SUBC", "SUBE", "SUBZE", "NEG"
607 // ppc64le:"SUBC", "SUBE", "SUBZE", "NEG"
609 return bits.Sub(x, 7, ci)
612 func SubZ(x, y uint) (r, co uint) {
613 // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL"
614 // arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP"
615 // ppc64:"SUBC", -"SUBE", "SUBZE", "NEG"
616 // ppc64le:"SUBC", -"SUBE", "SUBZE", "NEG"
618 return bits.Sub(x, y, 0)
621 func SubR(x, y, ci uint) uint {
622 // amd64:"NEGL","SBBQ",-"NEGQ"
623 // arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP"
624 // ppc64:"SUBC", "SUBE", -"SUBZE", -"NEG"
625 // ppc64le:"SUBC", "SUBE", -"SUBZE", -"NEG"
627 r, _ := bits.Sub(x, y, ci)
630 func SubM(p, q, r *[3]uint) {
632 r[0], c = bits.Sub(p[0], q[0], c)
633 // amd64:"SBBQ",-"NEGL",-"NEGQ"
634 // arm64:"SBCS",-"NEGS",-"NGC",-"NEG",-"ADD",-"SUB",-"CMP"
635 // ppc64:-"SUBC", "SUBE", -"SUBZE", -"NEG"
636 // ppc64le:-"SUBC", "SUBE", -"SUBZE", -"NEG"
638 r[1], c = bits.Sub(p[1], q[1], c)
639 r[2], c = bits.Sub(p[2], q[2], c)
642 func Sub64(x, y, ci uint64) (r, co uint64) {
643 // amd64:"NEGL","SBBQ","NEGQ"
644 // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
645 // ppc64:"SUBC", "SUBE", "SUBZE", "NEG"
646 // ppc64le:"SUBC", "SUBE", "SUBZE", "NEG"
648 return bits.Sub64(x, y, ci)
651 func Sub64C(x, ci uint64) (r, co uint64) {
652 // amd64:"NEGL","SBBQ","NEGQ"
653 // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
654 // ppc64:"SUBC", "SUBE", "SUBZE", "NEG"
655 // ppc64le:"SUBC", "SUBE", "SUBZE", "NEG"
657 return bits.Sub64(x, 7, ci)
660 func Sub64Z(x, y uint64) (r, co uint64) {
661 // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL"
662 // arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP"
663 // ppc64:"SUBC", -"SUBE", "SUBZE", "NEG"
664 // ppc64le:"SUBC", -"SUBE", "SUBZE", "NEG"
666 return bits.Sub64(x, y, 0)
669 func Sub64R(x, y, ci uint64) uint64 {
670 // amd64:"NEGL","SBBQ",-"NEGQ"
671 // arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP"
672 // ppc64:"SUBC", "SUBE", -"SUBZE", -"NEG"
673 // ppc64le:"SUBC", "SUBE", -"SUBZE", -"NEG"
675 r, _ := bits.Sub64(x, y, ci)
678 func Sub64M(p, q, r *[3]uint64) {
680 r[0], c = bits.Sub64(p[0], q[0], c)
681 // amd64:"SBBQ",-"NEGL",-"NEGQ"
682 // arm64:"SBCS",-"NEGS",-"NGC",-"NEG",-"ADD",-"SUB",-"CMP"
684 r[1], c = bits.Sub64(p[1], q[1], c)
685 r[2], c = bits.Sub64(p[2], q[2], c)
688 func Sub64MSaveC(p, q, r, c *[2]uint64) {
689 // ppc64:"SUBC\tR\\d+, R\\d+,", "SUBZE", "NEG"
690 // ppc64le:"SUBC\tR\\d+, R\\d+,", "SUBZE", "NEG"
691 r[0], c[0] = bits.Sub64(p[0], q[0], 0)
692 // ppc64:"SUBC\tR\\d+, [$]0,", "SUBE", "SUBZE", "NEG"
693 // ppc64le:"SUBC\tR\\d+, [$]0,", "SUBE", "SUBZE", "NEG"
694 r[1], c[1] = bits.Sub64(p[1], q[1], c[0])
697 func Sub64PanicOnOverflowEQ(a, b uint64) uint64 {
698 r, b := bits.Sub64(a, b, 0)
699 // s390x:"BRC\t[$]12,",-"ADDE",-"SUBE"
706 func Sub64PanicOnOverflowNE(a, b uint64) uint64 {
707 r, b := bits.Sub64(a, b, 0)
708 // s390x:"BRC\t[$]12,",-"ADDE",-"SUBE"
715 func Sub64PanicOnOverflowGT(a, b uint64) uint64 {
716 r, b := bits.Sub64(a, b, 0)
717 // s390x:"BRC\t[$]12,",-"ADDE",-"SUBE"
724 func Sub64MPanicOnOverflowEQ(a, b [2]uint64) [2]uint64 {
727 r[0], c = bits.Sub64(a[0], b[0], c)
728 r[1], c = bits.Sub64(a[1], b[1], c)
729 // s390x:"BRC\t[$]12,"
736 func Sub64MPanicOnOverflowNE(a, b [2]uint64) [2]uint64 {
739 r[0], c = bits.Sub64(a[0], b[0], c)
740 r[1], c = bits.Sub64(a[1], b[1], c)
741 // s390x:"BRC\t[$]12,"
748 func Sub64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 {
751 r[0], c = bits.Sub64(a[0], b[0], c)
752 r[1], c = bits.Sub64(a[1], b[1], c)
753 // s390x:"BRC\t[$]12,"
760 // --------------- //
762 // --------------- //
764 func Mul(x, y uint) (hi, lo uint) {
766 // arm64:"UMULH","MUL"
767 // ppc64:"MULHDU","MULLD"
768 // ppc64le:"MULHDU","MULLD"
771 return bits.Mul(x, y)
774 func Mul64(x, y uint64) (hi, lo uint64) {
776 // arm64:"UMULH","MUL"
777 // ppc64:"MULHDU","MULLD"
778 // ppc64le:"MULHDU","MULLD"
781 // riscv64:"MULHU","MUL"
782 return bits.Mul64(x, y)
785 // --------------- //
787 // --------------- //
789 func Div(hi, lo, x uint) (q, r uint) {
791 return bits.Div(hi, lo, x)
794 func Div32(hi, lo, x uint32) (q, r uint32) {
795 // arm64:"ORR","UDIV","MSUB",-"UREM"
796 return bits.Div32(hi, lo, x)
799 func Div64(hi, lo, x uint64) (q, r uint64) {
801 return bits.Div64(hi, lo, x)
804 func Div64degenerate(x uint64) (q, r uint64) {
806 return bits.Div64(0, x, 5)