$ nncp-ack [options] -all
$ nncp-ack [options] -node NODE[,@dots{}]
$ nncp-ack [options] -node NODE -pkt PKT
+
+$ nncp-ack [@dots{}] 4>&1 >&2 | nncp-rm [@dots{}] -pkt
@end example
Send acknowledgement of successful @option{PKT} (Base32-encoded hash)
specified, then acknowledge all packet in node's @code{rx} outbound
spool. If @option{-all} is specified, then do that for all nodes.
+That commands outputs list of created encrypted ACK packets
+(@code{NODE/PKT}) to @strong{4}th file descriptor. That output can be
+passed for example to @command{@ref{nncp-rm}} to remove them after
+transmission to not wait for acknowledgement and retransmission.
+
General workflow with acknowledgement is following, assuming that
Alice has some outbound packets for Bob:
That will also check if copied packets checksum is not mismatched.
-@item Create ACK packets of received ones:
+@item Create ACK packets of received ones, saving the list of encrypted
+ ACK packets:
@example
-bob$ nncp-ack -node alice
+bob$ nncp-ack -node alice 4>acks
@end example
@item Send those newly created packets back to Alice:
@example
-bob$ nncp-xfer -tx /mnt/shared
+bob$ nncp-xfer [-keep] -tx /mnt/shared
+@end example
+
+@item Remove them from outbound spool, because we expect no
+ acknowledgement for them:
+
+@example
+bob$ nncp-rm -node alice -pkt <acks
@end example
@item Get those acknowledgement packets and @ref{nncp-toss, toss} them:
src io.Reader,
pktName string,
areaId *AreaId,
-) (*Node, int64, error) {
+) (*Node, int64, string, error) {
var area *Area
if areaId != nil {
area = ctx.AreaId2Area[*areaId]
if area.Prv == nil {
- return nil, 0, errors.New("area has no encryption keys")
+ return nil, 0, "", errors.New("area has no encryption keys")
}
}
hops := make([]*Node, 0, 1+len(node.Via))
expectedSize += sizePadCalc(expectedSize, minSize, wrappers)
expectedSize = PktEncOverhead + sizeWithTags(expectedSize)
if maxSize != 0 && expectedSize > maxSize {
- return nil, 0, TooBig
+ return nil, 0, "", TooBig
}
if !ctx.IsEnoughSpace(expectedSize) {
- return nil, 0, errors.New("is not enough space")
+ return nil, 0, "", errors.New("is not enough space")
}
}
tmp, err := ctx.NewTmpFileWHash()
if err != nil {
- return nil, 0, err
+ return nil, 0, "", err
}
results := make(chan PktEncWriteResult)
r := <-results
if r.err != nil {
tmp.Fd.Close()
- return nil, 0, r.err
+ return nil, 0, "", r.err
}
if r.pktEncRaw != nil {
pktEncRaw = r.pktEncRaw
err = tmp.Commit(filepath.Join(nodePath, string(TTx)))
os.Symlink(nodePath, filepath.Join(ctx.Spool, lastNode.Name))
if err != nil {
- return lastNode, 0, err
+ return lastNode, 0, "", err
}
if ctx.HdrUsage {
ctx.HdrWrite(pktEncRaw, filepath.Join(nodePath, string(TTx), tmp.Checksum()))
}
if err = ensureDir(seenDir); err != nil {
ctx.LogE("tx-mkdir", les, err, logMsg)
- return lastNode, 0, err
+ return lastNode, 0, "", err
}
if fd, err := os.Create(seenPath); err == nil {
fd.Close()
if err = DirSync(seenDir); err != nil {
ctx.LogE("tx-dirsync", les, err, logMsg)
- return lastNode, 0, err
+ return lastNode, 0, "", err
}
}
ctx.LogI("tx-area", les, logMsg)
}
- return lastNode, payloadSize, err
+ return lastNode, payloadSize, tmp.Checksum(), err
}
type DummyCloser struct{}
if err != nil {
return err
}
- _, finalSize, err := ctx.Tx(
+ _, finalSize, pktName, err := ctx.Tx(
node, pkt, nice,
srcSize, minSize, maxSize,
bufio.NewReader(reader), dstPath, areaId,
{"Src", srcPath},
{"Dst", dstPath},
{"Size", finalSize},
+ {"Pkt", pktName},
}
logMsg := func(les LEs) string {
return fmt.Sprintf(
return err
}
hsh := MTHNew(0, 0)
- _, size, err := ctx.Tx(
+ _, size, pktName, err := ctx.Tx(
node, pkt, nice,
0, minSize, maxSize,
io.TeeReader(lr, hsh),
{"Src", srcPath},
{"Dst", path},
{"Size", size},
+ {"Pkt", pktName},
}
logMsg := func(les LEs) string {
return fmt.Sprintf(
return err
}
metaPktSize := int64(buf.Len())
- _, _, err = ctx.Tx(
+ _, _, pktName, err := ctx.Tx(
node,
pkt,
nice,
{"Src", srcPath},
{"Dst", path},
{"Size", metaPktSize},
+ {"Pkt", pktName},
}
logMsg := func(les LEs) string {
return fmt.Sprintf(
}
src := strings.NewReader(dstPath)
size := int64(src.Len())
- _, _, err = ctx.Tx(node, pkt, nice, size, minSize, MaxFileSize, src, srcPath, nil)
+ _, _, pktName, err := ctx.Tx(
+ node, pkt, nice, size, minSize, MaxFileSize, src, srcPath, nil,
+ )
les := LEs{
{"Type", "freq"},
{"Node", node.Id},
{"ReplyNice", int(replyNice)},
{"Src", srcPath},
{"Dst", dstPath},
+ {"Pkt", pktName},
}
logMsg := func(les LEs) string {
return fmt.Sprintf(
}(in)
in = pr
}
- _, size, err := ctx.Tx(node, pkt, nice, 0, minSize, maxSize, in, handle, areaId)
+ _, size, pktName, err := ctx.Tx(
+ node, pkt, nice, 0, minSize, maxSize, in, handle, areaId,
+ )
if !noCompress {
e := <-compressErr
if err == nil {
{"ReplyNice", int(replyNice)},
{"Dst", dst},
{"Size", size},
+ {"Pkt", pktName},
}
logMsg := func(les LEs) string {
return fmt.Sprintf(
nice uint8,
hsh string,
minSize int64,
-) error {
+) (pktName string, err error) {
hshRaw, err := Base32Codec.DecodeString(hsh)
if err != nil {
- return err
+ return "", err
}
if len(hshRaw) != MTHSize {
- return errors.New("Invalid packet id size")
+ return "", errors.New("Invalid packet id size")
}
pkt, err := NewPkt(PktTypeACK, nice, []byte(hshRaw))
if err != nil {
- return err
+ return "", err
}
src := bytes.NewReader([]byte{})
- _, _, err = ctx.Tx(node, pkt, nice, 0, minSize, MaxFileSize, src, hsh, nil)
+ _, _, pktName, err = ctx.Tx(
+ node, pkt, nice, 0, minSize, MaxFileSize, src, hsh, nil,
+ )
les := LEs{
{"Type", "ack"},
{"Node", node.Id},
{"Nice", int(nice)},
{"Pkt", hsh},
+ {"NewPkt", pktName},
}
logMsg := func(les LEs) string {
return fmt.Sprintf("ACK to %s of %s is sent", ctx.NodeName(node.Id), hsh)
} else {
ctx.LogE("tx", les, err, logMsg)
}
- return err
+ return
}