Default is 4KiB, but internally NNCP mostly uses 128KiB.
continue
}
pipeR, pipeW := io.Pipe()
- go nncp.PktEncRead(ctx.Self, ctx.Neigh, bufio.NewReader(fd), pipeW, true, nil)
+ go nncp.PktEncRead(
+ ctx.Self,
+ ctx.Neigh,
+ bufio.NewReaderSize(fd, nncp.MTHBlockSize),
+ pipeW, true, nil,
+ )
var pkt nncp.Pkt
_, err = xdr.Unmarshal(pipeR, &pkt)
fd.Close()
"go.cypherpunks.ru/nncp/v8"
)
-const (
- CopyBufSize = 1 << 17
-)
-
func usage() {
fmt.Fprintf(os.Stderr, nncp.UsageHeader())
fmt.Fprintf(os.Stderr, "nncp-bundle -- Create/digest stream of NNCP encrypted packets\n\n")
log.Fatalln("Error writing tar header:", err)
}
if _, err = nncp.CopyProgressed(
- tarWr, bufio.NewReader(fd), "Tx",
+ tarWr, bufio.NewReaderSize(fd, nncp.MTHBlockSize), "Tx",
append(les, nncp.LEs{
{K: "Pkt", V: nncp.Base32Codec.EncodeToString(job.HshValue[:])},
{K: "FullSize", V: job.Size},
log.Fatalln("Error during tar closing:", err)
}
} else {
- bufStdin := bufio.NewReaderSize(os.Stdin, CopyBufSize*2)
+ bufStdin := bufio.NewReaderSize(os.Stdin, nncp.MTHBlockSize*2)
pktEncBuf := make([]byte, nncp.PktEncOverhead)
var pktEnc *nncp.PktEnc
for {
- peeked, err := bufStdin.Peek(CopyBufSize)
+ peeked, err := bufStdin.Peek(nncp.MTHBlockSize)
if err != nil && err != io.EOF {
log.Fatalln("Error during reading:", err)
}
if err != nil {
log.Fatalln("Error during temporary file creation:", err)
}
- bufTmp := bufio.NewWriterSize(tmp, CopyBufSize)
+ bufTmp := bufio.NewWriterSize(tmp, nncp.MTHBlockSize)
if _, err = bufTmp.Write(pktEncBuf); err != nil {
log.Fatalln("Error during writing:", err)
}
replyNice,
flag.Args()[1],
flag.Args()[2:],
- bufio.NewReader(os.Stdin),
+ bufio.NewReaderSize(os.Stdin, nncp.MTHBlockSize),
int64(*minSize)*1024,
maxSize,
*noCompress,
}
hsh = nncp.MTHNew(fi.Size(), 0)
if _, err = nncp.CopyProgressed(
- hsh, bufio.NewReader(fd), "check",
+ hsh, bufio.NewReaderSize(fd, nncp.MTHBlockSize), "check",
nncp.LEs{{K: "Pkt", V: chunkPath}, {K: "FullSize", V: fi.Size()}},
ctx.ShowPrgrs,
); err != nil {
log.Fatalln("Can not stat file:", err)
}
if _, err = nncp.CopyProgressed(
- dstW, bufio.NewReader(fd), "reass",
+ dstW, bufio.NewReaderSize(fd, nncp.MTHBlockSize), "reass",
nncp.LEs{{K: "Pkt", V: chunkPath}, {K: "FullSize", V: fi.Size()}},
ctx.ShowPrgrs,
); err != nil {
}
r, w := io.Pipe()
go func() {
- _, err := io.CopyN(w, bufio.NewReader(fd), fiInt.Size())
+ _, err := io.CopyN(
+ w, bufio.NewReaderSize(fd, nncp.MTHBlockSize), fiInt.Size(),
+ )
if err == nil {
err = w.Close()
}
}
bufW := bufio.NewWriter(tmp)
copied, err := nncp.CopyProgressed(
- bufW, bufio.NewReader(fd), "Tx",
+ bufW, bufio.NewReaderSize(fd, nncp.MTHBlockSize), "Tx",
append(les, nncp.LE{K: "FullSize", V: job.Size}),
ctx.ShowPrgrs,
)
sharedKey, _, _, err = PktEncRead(
ctx.Self,
ctx.Neigh,
- bufio.NewReader(fd),
+ bufio.NewReaderSize(fd, MTHBlockSize),
pipeWB,
sharedKey == nil,
sharedKey,
fd.Close()
return w.CloseWithError(err)
}
- if _, err = io.Copy(tarWr, bufio.NewReader(fd)); err != nil {
+ if _, err = io.Copy(
+ tarWr, bufio.NewReaderSize(fd, MTHBlockSize),
+ ); err != nil {
fd.Close()
return w.CloseWithError(err)
}
_, finalSize, pktName, err := ctx.Tx(
node, pkt, nice,
srcSize, minSize, maxSize,
- bufio.NewReader(reader), dstPath, areaId,
+ bufio.NewReaderSize(reader, MTHBlockSize), dstPath, areaId,
)
les := LEs{
{"Type", "file"},
return err
}
- br := bufio.NewReader(reader)
+ br := bufio.NewReaderSize(reader, MTHBlockSize)
var sizeFull int64
var chunkNum int
checksums := [][MTHSize]byte{}