]> Cypherpunks.ru repositories - nncp.git/commitdiff
Merge branch 'develop' v5.2.0
authorSergey Matveev <stargrave@stargrave.org>
Sat, 14 Dec 2019 18:44:32 +0000 (21:44 +0300)
committerSergey Matveev <stargrave@stargrave.org>
Sat, 14 Dec 2019 18:44:32 +0000 (21:44 +0300)
52 files changed:
VERSION
doc/building.texi
doc/bundles.texi
doc/cfg.texi
doc/cmds.texi
doc/download.texi
doc/index.texi
doc/integration.texi
doc/integrity.texi
doc/news.ru.texi
doc/news.texi
doc/sources.texi
doc/sp.texi
doc/spool.texi
doc/usecases.ru.texi
doc/usecases.texi
ports/nncp/Makefile
src/call.go
src/cfg.go
src/check.go
src/cmd/nncp-bundle/main.go
src/cmd/nncp-call/main.go
src/cmd/nncp-caller/main.go
src/cmd/nncp-cfgmin/main.go
src/cmd/nncp-cfgnew/main.go
src/cmd/nncp-check/main.go
src/cmd/nncp-daemon/main.go
src/cmd/nncp-exec/main.go
src/cmd/nncp-file/main.go
src/cmd/nncp-freq/main.go
src/cmd/nncp-log/main.go
src/cmd/nncp-pkt/main.go
src/cmd/nncp-reass/main.go
src/cmd/nncp-rm/main.go
src/cmd/nncp-stat/main.go
src/cmd/nncp-toss/main.go
src/cmd/nncp-xfer/main.go
src/ctx.go
src/humanizer.go
src/jobs.go
src/lockdir.go
src/log.go
src/progress.go [new file with mode: 0644]
src/sp.go
src/toss.go
src/tx.go
src/tx_test.go
src/uilive/LICENSE [new file with mode: 0644]
src/uilive/README.md [new file with mode: 0644]
src/uilive/doc.go [new file with mode: 0644]
src/uilive/terminal_size.go [new file with mode: 0644]
src/uilive/writer.go [new file with mode: 0644]

diff --git a/VERSION b/VERSION
index 61fcc87350341bc51d640aa9161073ce615703ce..91ff57278e37ef9cecfeaea47f0d77966799af28 100644 (file)
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-5.1.2
+5.2.0
index 4f30e3e7dfee2f652cb8009de2fd1a6377517f38..cb4d4393e1f88f0c13a95248c6d05ad50e3b3d7d 100644 (file)
@@ -10,17 +10,17 @@ Make sure that Go is installed. For example to install it from packages:
     @verb{|apt install golang|}
 @end table
 
-@verbatim
-$ [fetch|wget] http://www.nncpgo.org/download/nncp-5.1.2.tar.xz
-$ [fetch|wget] http://www.nncpgo.org/download/nncp-5.1.2.tar.xz.sig
-$ gpg --verify nncp-5.1.2.tar.xz.sig nncp-5.1.2.tar.xz
-$ xz --decompress --stdout nncp-5.1.2.tar.xz | tar xf -
-$ make -C nncp-5.1.2 all
-@end verbatim
+@example
+$ [fetch|wget] http://www.nncpgo.org/download/nncp-@value{VERSION}.tar.xz
+$ [fetch|wget] http://www.nncpgo.org/download/nncp-@value{VERSION}.tar.xz.sig
+$ gpg --verify nncp-@value{VERSION}.tar.xz.sig nncp-@value{VERSION}.tar.xz
+$ xz --decompress --stdout nncp-@value{VERSION}.tar.xz | tar xf -
+$ make -C nncp-@value{VERSION} all
+@end example
 
 There is @command{install} make-target respecting @env{DESTDIR}. It will
 install binaries and info-documentation:
 
-@verbatim
-# make -C nncp-5.1.2 install PREFIX=/usr/local
-@end verbatim
+@example
+# make -C nncp-@value{VERSION} install PREFIX=/usr/local
+@end example
index 21b31046a1ae1c7d1d0aec48f6f5f76757b86c4c..7bbc85a274d7480156383c87f18c5546475d9f07 100644 (file)
@@ -17,23 +17,23 @@ sequentially streamed for recording and digested back.
 
 @item They do not require intermediate storage before recording on
 either CD-ROM or tape drive.
-@verbatim
+@example
 $ nncp-bundle -tx SOMENODE | cdrecord -tao -         # record directly to CD
 $ nncp-bundle -tx SOMENODE | dd of=/dev/sa0 bs=10240 # record directly to tape
 
 $ dd if=/dev/cd0 bs=2048 | nncp-bundle -rx  # read directly from CD
 $ dd if=/dev/sa0 bs=10240 | nncp-bundle -rx # read directly from tape
-@end verbatim
+@end example
 
 @item They do not require filesystem existence to deal with, simplifying
 administration when operating in heterogeneous systems with varying
 filesystems. No @command{mount}/@command{umount}, @command{zpool
 import}/@command{zpool export} and struggling with file permissions.
-@verbatim
+@example
 $ nncp-bundle -tx SOMENODE | dd of=/dev/da0 bs=1M # record directly to
                                                   # hard/flash drive
 $ dd if=/dev/da0 bs=1M | nncp-bundle -rx # read directly from drive
-@end verbatim
+@end example
 
 @item This is the fastest way to record outbound packets for offline
 transmission -- sequential write is always faster, when no
index 46fa5f30ca50aefe1d8b09efccd167f352986f33..1acfc33377dc06d6f257dd0b30785a0a65957f44 100644 (file)
@@ -8,6 +8,7 @@ Example @url{https://hjson.org/, Hjson} configuration file:
   spool: /var/spool/nncp
   log: /var/spool/nncp/log
   umask: "022"
+  noprogress: true
 
   notify: {
     file: {
@@ -98,6 +99,10 @@ Non-empty optional @strong{umask} will force all invoked commands to
 override their umask to specified octal mask. Useful for using with
 @ref{Shared spool, shared spool directories}.
 
+Enabled @strong{noprogress} option disabled progress showing for many
+commands by default. You can always force its showing with
+@option{-progress} command line option anyway.
+
 @anchor{CfgNotify}
 @strong{notify} section contains notification settings for successfully
 tossed file, freq and exec packets. Corresponding @strong{from} and
@@ -143,12 +148,12 @@ arguments and the body fed to command's stdin.
 @verb{|echo hello world | nncp-exec OURNODE sendmail ARG0 ARG1 ARG2|}
 command, will execute:
 
-@verbatim
+@example
 NNCP_SELF=OURNODE \
 NNCP_SENDER=REMOTE \
 NNCP_NICE=64 \
 /usr/sbin/sendmail -t ARG0 ARG1 ARG2
-@end verbatim
+@end example
 
 feeding @verb{|hello world\n|} to that started @command{sendmail}
 process.
@@ -230,9 +235,9 @@ users, then you can @command{setgid} it and assure that umask is group
 friendly. For convenience you can set @option{umask} globally for
 invoked NNCP commands in the configuration file. For example:
 
-@verbatim
+@example
 $ chgrp nncp /usr/local/etc/nncp.hjson /var/spool/nncp
 $ chmod g+r /usr/local/etc/nncp.hjson
 $ chmod g+rwxs /var/spool/nncp
 $ echo 'umask: "007"' >> /usr/local/etc/nncp.hjson
-@end verbatim
+@end example
index 32779f60a609973e9689a233ebe6e712609b3bf6..2606b6ee12bd9e59c67726eb57a93c1ec5c0999a 100644 (file)
@@ -34,6 +34,8 @@ Nearly all commands have the following common options:
     Print only errors, omit simple informational messages. In any case
     those messages are logged, so you can reread them using
     @ref{nncp-log} command.
+@item -progress, -noprogress
+    Either force progress showing, or disable it.
 @item -version
     Print version information.
 @item -warranty
@@ -43,11 +45,11 @@ Nearly all commands have the following common options:
 @node nncp-bundle
 @section nncp-bundle
 
-@verbatim
+@example
 $ nncp-bundle [options] -tx [-delete] NODE [NODE ...] > ...
 $ nncp-bundle [options] -rx -delete [-dryrun] [NODE ...] < ...
 $ nncp-bundle [options] -rx [-check] [-dryrun] [NODE ...] < ...
-@end verbatim
+@end example
 
 With @option{-tx} option, this command creates @ref{Bundles, bundle} of
 @ref{Encrypted, encrypted packets} from the spool directory and writes
@@ -78,10 +80,10 @@ with @option{-rx} and @option{-delete} options -- in that mode, stream
 packets integrity will be checked and they will be deleted from the
 spool if everything is good. So it is advisable to recheck your streams:
 
-@verbatim
+@example
 $ nncp-bundle -tx ALICE BOB WHATEVER | cdrecord -tao -
 $ dd if=/dev/cd0 bs=2048 | nncp-bundle -rx -delete
-@end verbatim
+@end example
 
 @option{-dryrun} option prevents any writes to the spool. This is
 useful when you need to see what packets will pass by and possibly check
@@ -90,7 +92,7 @@ their integrity.
 @node nncp-call
 @section nncp-call
 
-@verbatim
+@example
 $ nncp-call [options]
     [-onlinedeadline INT]
     [-maxonlinetime INT]
@@ -100,7 +102,7 @@ $ nncp-call [options]
     [-rxrate INT]
     [-txrate INT]
     NODE[:ADDR] [FORCEADDR]
-@end verbatim
+@end example
 
 Call (connect to) specified @option{NODE} and run @ref{Sync,
 synchronization} protocol with the @ref{nncp-daemon, daemon} on the
@@ -139,9 +141,9 @@ notification.
 @node nncp-caller
 @section nncp-caller
 
-@verbatim
+@example
 $ nncp-caller [options] [NODE ...]
-@end verbatim
+@end example
 
 Croned daemon that calls remote nodes from time to time, according to
 their @ref{CfgCalls, @emph{calls}} configuration field.
@@ -155,10 +157,10 @@ Look @ref{nncp-call} for more information.
 @node nncp-cfgenc
 @section nncp-cfgenc
 
-@verbatim
+@example
 $ nncp-cfgmin [options] [-s INT] [-t INT] [-p INT] cfg.hjson > cfg.hjson.eblob
 $ nncp-cfgmin [options] -d cfg.hjson.eblob > cfg.hjson
-@end verbatim
+@end example
 
 This command allows you to encrypt provided @file{cfg.hjson} file with
 the passphrase, producing @ref{EBlob, eblob}, to safely keep your
@@ -183,21 +185,21 @@ if passphrase can not decrypt @file{eblob}.
 
 @option{-dump} options parses @file{eblob} and prints parameters used
 during its creation. For example:
-@verbatim
+@example
 $ nncp-cfgenc -dump /usr/local/etc/nncp.hjson.eblob
 Strengthening function: Balloon with BLAKE2b-256
 Memory space cost: 1048576 bytes
 Number of rounds: 16
 Number of parallel jobs: 2
 Blob size: 2494
-@end verbatim
+@end example
 
 @node nncp-cfgmin
 @section nncp-cfgmin
 
-@verbatim
+@example
 $ nncp-cfgmin [options] > stripped.hjson
-@end verbatim
+@end example
 
 Print out stripped configuration version: only path to @ref{Spool,
 spool}, path to log file, neighbours public keys are stayed. This is
@@ -207,9 +209,9 @@ neighbours, without private keys involving.
 @node nncp-cfgnew
 @section nncp-cfgnew
 
-@verbatim
+@example
 $ nncp-cfgnew [options] [-nocomments] > new.hjson
-@end verbatim
+@end example
 
 Generate new node configuration: private keys, example configuration
 file and print it to stdout. You must use this command when you setup
@@ -222,9 +224,9 @@ operating system.
 @node nncp-check
 @section nncp-check
 
-@verbatim
+@example
 $ nncp-check [options]
-@end verbatim
+@end example
 
 Perform @ref{Spool, spool} directory integrity check. Read all files
 that has Base32-encoded filenames and compare it with recalculated
@@ -234,9 +236,9 @@ not used often in practice, if ever.
 @node nncp-daemon
 @section nncp-daemon
 
-@verbatim
+@example
 $ nncp-daemon [options] [-maxconn INT] [-bind ADDR] [-inetd]
-@end verbatim
+@end example
 
 Start listening TCP daemon, wait for incoming connections and run
 @ref{Sync, synchronization protocol} with each of them. You can run
@@ -257,9 +259,9 @@ uucp        stream  tcp6    nowait  nncpuser        /usr/local/bin/nncp-daemon      nncp-daemon -inetd
 @node nncp-exec
 @section nncp-exec
 
-@verbatim
+@example
 $ nncp-exec [options] NODE HANDLE [ARG0 ARG1 ...]
-@end verbatim
+@end example
 
 Send execution command to @option{NODE} for specified @option{HANDLE}.
 Body is read from stdin and compressed. After receiving, remote side
@@ -279,13 +281,13 @@ exec: {
 then executing @verb{|echo My message | nncp-exec -replynice 123 REMOTE
 sendmail root@localhost|} will lead to execution of:
 
-@verbatim
+@example
 echo My message |
     NNCP_SELF=REMOTE \
     NNCP_SENDER=OurNodeId \
     NNCP_NICE=123 \
-    /usr/sbin/sendmail -t root@localhost
-@end verbatim
+    /usr/sbin/sendmail -t root@@localhost
+@end example
 
 If @ref{CfgNotify, notification} is enabled on the remote side for exec
 handles, then it will sent simple letter after successful command
@@ -294,9 +296,9 @@ execution with its output in message body.
 @node nncp-file
 @section nncp-file
 
-@verbatim
+@example
 $ nncp-file [options] [-chunked INT] SRC NODE:[DST]
-@end verbatim
+@end example
 
 Send @file{SRC} file to remote @option{NODE}. @file{DST} specifies
 destination file name in remote's @ref{CfgIncoming, incoming}
@@ -342,9 +344,9 @@ file receiving.
 @node nncp-freq
 @section nncp-freq
 
-@verbatim
+@example
 $ nncp-freq [options] NODE:SRC [DST]
-@end verbatim
+@end example
 
 Send file request to @option{NODE}, asking it to send its @file{SRC}
 file from @ref{CfgFreq, freq.path} directory to our node under @file{DST}
@@ -358,45 +360,45 @@ queuing.
 @node nncp-log
 @section nncp-log
 
-@verbatim
+@example
 $ nncp-log [options]
-@end verbatim
+@end example
 
 Parse @ref{Log, log} file and print out its records in human-readable form.
 
 @node nncp-pkt
 @section nncp-pkt
 
-@verbatim
+@example
 $ nncp-pkt [options] < pkt
 $ nncp-pkt [options] [-decompress] -dump < pkt > payload
 $ nncp-pkt -overheads
-@end verbatim
+@end example
 
 Low level packet parser. Normally it should not be used, but can help in
 debugging.
 
 By default it will print packet's type, for example:
-@verbatim
+@example
 Packet type: encrypted
 Niceness: 64
 Sender: 2WHBV3TPZHDOZGUJEH563ZEK7M33J4UESRFO4PDKWD5KZNPROABQ
-@end verbatim
+@end example
 
 If you specify @option{-dump} option and provide an @ref{Encrypted,
 encrypted} packet, then it will verify and decrypt it to stdout.
 Encrypted packets contain @ref{Plain, plain} ones, that also can be fed
 to @command{nncp-pkt}:
 
-@verbatim
+@example
 Packet type: plain
 Payload type: transitional
 Path: VHMTRWDOXPLK7BR55ICZ5N32ZJUMRKZEMFNGGCEAXV66GG43PEBQ
 
 Packet type: plain
 Payload type: mail
-Path: stargrave@stargrave.org
-@end verbatim
+Path: stargrave@@stargrave.org
+@end example
 
 And with the @option{-dump} option it will give you the actual payload
 (the whole file, mail message, and so on). @option{-decompress} option
@@ -408,10 +410,10 @@ packets).
 @node nncp-reass
 @section nncp-reass
 
-@verbatim
+@example
 $ nncp-reass [options] [-dryrun] [-keep] [-dump] [-stdout] FILE.nncp.meta
-$ nncp-reass [options] [-dryrun] [-keep] {-all | -node NODE}
-@end verbatim
+$ nncp-reass [options] [-dryrun] [-keep] @{-all | -node NODE@}
+@end example
 
 Reassemble @ref{Chunked, chunked file} after @ref{nncp-toss, tossing}.
 
@@ -450,7 +452,7 @@ and/or separate storage device for higher performance.
 
 @option{-dump} option prints meta-file contents in human-friendly form.
 It is useful mainly for debugging purposes. For example:
-@verbatim
+@example
 Original filename: testfile
 File size: 3.8 MiB (3987795 bytes)
 Chunk size: 1.0 MiB (1048576 bytes)
@@ -460,21 +462,21 @@ Checksums:
     1: 013a07e659f2e353d0e4339c3375c96c7fffaa2fa00875635f440bbc4631052a
     2: f4f883975a663f2252328707a30e71b2678f933b2f3103db8475b03293e4316e
     3: 0e9e229501bf0ca42d4aa07393d19406d40b179f3922a3986ef12b41019b45a3
-@end verbatim
+@end example
 
  Do not forget about @ref{ChunkedZFS, possible} ZFS deduplication issues.
 
 @node nncp-rm
 @section nncp-rm
 
-@verbatim
+@example
 $ nncp-rm [options] -tmp
 $ nncp-rm [options] -lock
 $ nncp-rm [options] -node NODE -part
 $ nncp-rm [options] -node NODE -seen
 $ nncp-rm [options] -node NODE [-rx] [-tx]
 $ nncp-rm [options] -node NODE -pkt PKT
-@end verbatim
+@end example
 
 This command is aimed to delete various files from your spool directory:
 
@@ -497,9 +499,9 @@ ones. If @option{-seen} option is specified, then delete only
 @node nncp-stat
 @section nncp-stat
 
-@verbatim
+@example
 $ nncp-stat [options] [-node NODE]
-@end verbatim
+@end example
 
 Print current @ref{Spool, spool} statistics about unsent and unprocessed
 packets. For each node (unless @option{-node} specified) and each
@@ -509,7 +511,7 @@ size) are in inbound (Rx) and outbound (Tx) queues.
 @node nncp-toss
 @section nncp-toss
 
-@verbatim
+@example
 $ nncp-toss [options]
     [-node NODE]
     [-dryrun]
@@ -519,7 +521,7 @@ $ nncp-toss [options]
     [-nofreq]
     [-noexec]
     [-notrns]
-@end verbatim
+@end example
 
 Perform "tossing" operation on all inbound packets. This is the tool
 that decrypts all packets and processes all payload packets in them:
@@ -545,9 +547,9 @@ options allow to disable any kind of packet types processing.
 @node nncp-xfer
 @section nncp-xfer
 
-@verbatim
+@example
 $ nncp-xfer [options] [-node NODE] [-mkdir] [-keep] [-rx|-tx] DIR
-@end verbatim
+@end example
 
 Search for directory in @file{DIR} containing inbound packets for us and
 move them to local @ref{Spool, spool} directory. Also search for known
index 7997689bb76253073160a608e200758419a75258..6282b292416741309e77c760accef5eb41ebad0b 100644 (file)
@@ -12,6 +12,7 @@ Tarballs include all necessary required libraries:
 @item @code{github.com/dustin/go-humanize} @tab MIT
 @item @code{github.com/flynn/noise} @tab BSD 3-Clause
 @item @code{github.com/gorhill/cronexpr} @tab GNU GPLv3
+@item @code{github.com/gosuri/uilive} @tab MIT
 @item @code{github.com/hjson/hjson-go} @tab MIT
 @item @code{github.com/klauspost/compress} @tab BSD 3-Clause
 @item @code{go.cypherpunks.ru/balloon} @tab GNU LGPLv3
@@ -23,6 +24,10 @@ Tarballs include all necessary required libraries:
 @multitable {XXXXX} {XXXX-XX-XX} {XXXX KiB} {link sign} {xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx}
 @headitem Version @tab Date @tab Size @tab Tarball @tab SHA256 checksum
 
+@item @ref{Release 5.1.2, 5.1.2} @tab 2019-12-13 @tab 1106 KiB
+@tab @url{download/nncp-5.1.2.tar.xz, link} @url{download/nncp-5.1.2.tar.xz.sig, sign}
+@tab @code{52B2043B 1B22D20F C44698EC AFE5FF46 F99B4DD5 2C392D4D 25FE1580 993263B3}
+
 @item @ref{Release 5.1.1, 5.1.1} @tab 2019-12-01 @tab 1103 KiB
 @tab @url{download/nncp-5.1.1.tar.xz, link} @url{download/nncp-5.1.1.tar.xz.sig, sign}
 @tab @code{B9537678 E5B549BA 6FA0D20D 41B2D4A9 4ED31F2C AB9FAF63 A388D95E 7662A93F}
index a83ea31be352e50881770dd47b23e6262883cb4d..56480cf374f0afc034dcbdda455ff9b91db4dd37 100644 (file)
@@ -2,6 +2,8 @@
 @documentencoding UTF-8
 @settitle NNCP
 
+@set VERSION 5.2.0
+
 @copying
 This manual is for NNCP (Node to Node copy) --  collection of utilities
 simplifying secure store-and-forward files and mail exchanging.
index 8240e2fb903ff67e9dd8c5923c5e8fef79d31f5a..a8a8b9ea4cc606d73a1fb7df2e9dac25a85f1a27 100644 (file)
@@ -23,13 +23,13 @@ want to freq from. Because files can be updated there. It is useful to
 run cron-ed job on it to create files listing you can freq and search
 for files in it:
 
-@verbatim
+@example
 0  4  *  *  *  cd /storage ; tmp=`mktemp` ; \
     tree -f -h -N --du --timefmt \%Y-\%m-\%d |
     zstdmt -19 > $tmp && chmod 644 $tmp && mv $tmp TREE.txt.zst ; \
     tree -J -f --timefmt \%Y-\%m-\%d |
     zstdmt -19 > $tmp && chmod 644 $tmp && mv $tmp TREE.json.zst
-@end verbatim
+@end example
 
 @node Postfix
 @section Integration with Postfix
@@ -51,11 +51,11 @@ the Postfix @command{sendmail} command.
 
 @item Define a @command{pipe(8)} based mail delivery transport for
 delivery via NNCP:
-@verbatim
+@example
 /usr/local/etc/postfix/master.cf:
 nncp      unix  -       n       n       -       -       pipe
           flags=F user=nncp argv=nncp-exec -quiet $nexthop sendmail $recipient
-@end verbatim
+@end example
 
 This runs the @command{nncp-exec} command to place outgoing mail into
 the NNCP queue after replacing @var{$nexthop} by the the receiving NNCP
@@ -67,11 +67,11 @@ shell meta characters in command-line parameters.
 @item Specify that mail for @emph{example.com}, should be delivered via
 NNCP, to a host named @emph{nncp-host}:
 
-@verbatim
+@example
 /usr/local/etc/postfix/transport:
     example.com     nncp:nncp-host
     .example.com    nncp:nncp-host
-@end verbatim
+@end example
 
 See the @command{transport(5)} manual page for more details.
 
@@ -80,18 +80,18 @@ whenever you change the @file{transport} file.
 
 @item Enable @file{transport} table lookups:
 
-@verbatim
+@example
 /usr/local/etc/postfix/main.cf:
     transport_maps = hash:$config_directory/transport
-@end verbatim
+@end example
 
 @item Add @emph{example.com} to the list of domains that your site is
 willing to relay mail for.
 
-@verbatim
+@example
 /usr/local/etc/postfix/main.cf:
     relay_domains = example.com ...other relay domains...
-@end verbatim
+@end example
 
 See the @option{relay_domains} configuration parameter description for
 details.
@@ -114,27 +114,27 @@ the Postfix @command{sendmail} command.
 @item Specify that all remote mail must be sent via the @command{nncp}
 mail transport to your NNCP gateway host, say, @emph{nncp-gateway}:
 
-@verbatim
+@example
 /usr/local/etc/postfix/main.cf:
     relayhost = nncp-gateway
     default_transport = nncp
-@end verbatim
+@end example
 
 Postfix 2.0 and later also allows the following more succinct form:
 
-@verbatim
+@example
 /usr/local/etc/postfix/main.cf:
     default_transport = nncp:nncp-gateway
-@end verbatim
+@end example
 
 @item Define a @command{pipe(8)} based message delivery transport for
 mail delivery via NNCP:
 
-@verbatim
+@example
 /usr/local/etc/postfix/master.cf:
 nncp      unix  -       n       n       -       -       pipe
           flags=F user=nncp argv=nncp-exec -quiet $nexthop sendmail $recipient
-@end verbatim
+@end example
 
 This runs the @command{nncp-exec} command to place outgoing mail into
 the NNCP queue. It substitutes the hostname (@emph{nncp-gateway}, or
@@ -159,33 +159,40 @@ program supports ETags and won't pollute the channel if remote server
 supports them too.
 
 After installing @command{rss2email}, create configuration file:
-@verbatim
-$ r2e new rss-robot@address.com
-@end verbatim
+
+@example
+$ r2e new rss-robot@@address.com
+@end example
+
 and add feeds you want to retrieve:
-@verbatim
+
+@example
 $ r2e add https://git.cypherpunks.ru/cgit.cgi/nncp.git/atom/?h=master
-@end verbatim
+@end example
+
 and run the process:
-@verbatim
+
+@example
 $ r2e run
-@end verbatim
+@end example
 
 @node WARCs
 @section Integration with Web pages
 
 Simple HTML web page can be downloaded very easily for sending and
 viewing it offline after:
-@verbatim
+
+@example
 $ wget http://www.example.com/page.html
-@end verbatim
+@end example
 
 But most web pages contain links to images, CSS and JavaScript files,
 required for complete rendering.
 @url{https://www.gnu.org/software/wget/, GNU Wget} supports that
 documents parsing and understanding page dependencies. You can download
 the whole page with dependencies the following way:
-@verbatim
+
+@example
 $ wget \
     --page-requisites \
     --convert-links \
@@ -195,19 +202,22 @@ $ wget \
     --random-wait \
     --execute robots=off \
     http://www.example.com/page.html
-@end verbatim
+@end example
+
 that will create @file{www.example.com} directory with all files
 necessary to view @file{page.html} web page. You can create single file
 compressed tarball with that directory and send it to remote node:
-@verbatim
+
+@example
 $ tar cf - www.example.com | zstd |
     nncp-file - remote.node:www.example.com-page.tar.zst
-@end verbatim
+@end example
 
 But there are multi-paged articles, there are the whole interesting
 sites you want to get in a single package. You can mirror the whole web
 site by utilizing @command{wget}'s recursive feature:
-@verbatim
+
+@example
 $ wget \
     --recursive \
     --timestamping \
@@ -216,20 +226,22 @@ $ wget \
     --no-parent \
     [...]
     http://www.example.com/
-@end verbatim
+@end example
 
 There is a standard for creating
 @url{https://en.wikipedia.org/wiki/Web_ARChive, Web ARChives}:
 @strong{WARC}. Fortunately again, @command{wget} supports it as an
 output format.
-@verbatim
+
+@example
 $ wget \
     --warc-file www.example_com-$(date '+%Y%M%d%H%m%S') \
     --no-warc-compression \
     --no-warc-keep-log \
     [...]
     http://www.example.com/
-@end verbatim
+@end example
+
 That command will create uncompressed @file{www.example_com-XXX.warc}
 web archive. By default, WARCs are compressed using
 @url{https://en.wikipedia.org/wiki/Gzip, gzip}, but, in example above,
@@ -241,12 +253,13 @@ There are plenty of software acting like HTTP proxy for your browser,
 allowing to view that WARC files. However you can extract files from
 that archive using @url{https://pypi.python.org/pypi/Warcat, warcat}
 utility, producing usual directory hierarchy:
-@verbatim
+
+@example
 $ python3 -m warcat extract \
     www.example_com-XXX.warc \
     --output-dir www.example.com-XXX \
     --progress
-@end verbatim
+@end example
 
 @node BitTorrent
 @section BitTorrent and huge files
@@ -267,12 +280,14 @@ connections.
 
 You can queue you files after they are completely downloaded.
 @file{aria2-downloaded.sh} contents:
+
 @verbatiminclude aria2-downloaded.sh
 
 Also you can prepare
 @url{http://aria2.github.io/manual/en/html/aria2c.html#files, input file}
 with the jobs you want to download:
-@verbatim
+
+@example
 $ cat jobs
 http://www.nncpgo.org/download/nncp-0.11.tar.xz
     out=nncp.txz
@@ -281,7 +296,8 @@ http://www.nncpgo.org/download/nncp-0.11.tar.xz.sig
 $ aria2c \
     --on-download-complete aria2-downloaded.sh \
     --input-file jobs
-@end verbatim
+@end example
+
 and all that downloaded (@file{nncp.txz}, @file{nncp.txz.sig}) files
 will be sent to @file{remote.node} when finished.
 
@@ -305,15 +321,17 @@ exec: {
 @end verbatim
 
 @file{warcer.sh} contents:
+
 @verbatiminclude warcer.sh
 
 @file{wgeter.sh} contents:
+
 @verbatiminclude wgeter.sh
 
 Now you can queue that node to send you some website's page, file or
 BitTorrents:
 
-@verbatim
+@example
 $ echo http://www.nncpgo.org/Postfix.html |
     nncp-exec remote.node warcer postfix-whole-page
 $ echo http://www.nncpgo.org/Postfix.html |
@@ -322,7 +340,7 @@ $ echo \
     http://www.nncpgo.org/download/nncp-0.11.tar.xz
     http://www.nncpgo.org/download/nncp-0.11.tar.xz.sig |
     nncp-exec remote.node aria2c
-@end verbatim
+@end example
 
 @node Git
 @section Integration with Git
@@ -333,32 +351,37 @@ necessary tools for store-and-forward networking.
 everything you need.
 
 Use it to create bundles containing all required blobs/trees/commits and tags:
-@verbatim
+
+@example
 $ git bundle create repo-initial.bundle master --tags --branches
 $ git tag -f last-bundle
 $ nncp-file repo-initial.bundle remote.node:repo-$(date % '+%Y%M%d%H%m%S').bundle
-@end verbatim
+@end example
 
 Do usual working with the Git: commit, add, branch, checkout, etc. When
 you decide to queue your changes for sending, create diff-ed bundle and
 transfer them:
-@verbatim
+
+@example
 $ git bundle create repo-$(date '+%Y%M%d%H%m%S').bundle last-bundle..master
 or maybe
 $ git bundle create repo-$(date '+%Y%M%d').bundle --since=10.days master
-@end verbatim
+@end example
 
 Received bundle on remote machine acts like usual remote:
-@verbatim
+
+@example
 $ git clone -b master repo-XXX.bundle
-@end verbatim
+@end example
+
 overwrite @file{repo.bundle} file with newer bundles you retrieve and
 fetch all required branches and commits:
-@verbatim
+
+@example
 $ git pull # assuming that origin remote points to repo.bundle
 $ git fetch repo.bundle master:localRef
 $ git ls-remote repo.bundle
-@end verbatim
+@end example
 
 Bundles are also useful when cloning huge repositories (like Linux has).
 Git's native protocol does not support any kind of interrupted download
@@ -369,7 +392,9 @@ bundle, you can add an ordinary @file{git://} remote and fetch the
 difference.
 
 Also you can find the following exec-handler useful:
+
 @verbatiminclude git-bundler.sh
+
 And it allows you to request for bundles like that:
 @code{echo some-old-commit..master | nncp-exec REMOTE bundler REPONAME}.
 
@@ -383,8 +408,9 @@ many of them are supported, including @emph{Dailymotion}, @emph{Vimeo}
 and @emph{YouTube}.
 
 When you multimedia becomes an ordinary file, you can transfer it easily.
-@verbatim
+
+@example
 $ youtube-dl \
-    --exec 'nncp-file {} remote.node:' \
+    --exec 'nncp-file @{@} remote.node:' \
     'https://www.youtube.com/watch?list=PLd2Cw8x5CytxPAEBwzilrhQUHt_UN10FJ'
-@end verbatim
+@end example
index faea7c49ba6219aaf46693227e48a8da05db16fa..1752e9d9206367df13d4951e913d2f5f7fd7d5ce 100644 (file)
@@ -19,10 +19,10 @@ uid   NNCP releases <releases at nncpgo dot org>
 @itemize
 
 @item
-@verbatim
+@example
 $ gpg --auto-key-locate dane --locate-keys releases at nncpgo dot org
 $ gpg --auto-key-locate wkd --locate-keys releases at nncpgo dot org
-@end verbatim
+@end example
 
 @item
 @verbatiminclude .well-known/openpgpkey/hu/i4cdqgcarfjdjnba6y4jnf498asg8c6p.asc
@@ -30,6 +30,7 @@ $ gpg --auto-key-locate wkd --locate-keys releases at nncpgo dot org
 @end itemize
 
 Then you could verify tarballs signature:
-@verbatim
-$ gpg --verify nncp-5.1.2.tar.xz.sig nncp-5.1.2.tar.xz
-@end verbatim
+
+@example
+$ gpg --verify nncp-@value{VERSION}.tar.xz.sig nncp-@value{VERSION}.tar.xz
+@end example
index 237af0fbde01a220a31ce2ae776df8d2d1e3e7df..fa95bf5c9eeaac3054d8d5ec2cb748b0aa4bef37 100644 (file)
@@ -1,6 +1,26 @@
 @node Новости
 @section Новости
 
+@node Релиз 5.2.0
+@subsection Релиз 5.2.0
+@itemize
+
+@item
+Большинство команд по умолчанию показывают однострочный прогресс
+выполнения операции. Появились @option{-progress}, @option{-noprogress}
+опции командной строки, @option{noprogress} опция конфигурационного
+файла.
+
+@item
+Исправлен некорректный код возврата @command{nncp-check} команды,
+который возвращал ошибку когда всё хорошо.
+
+@item
+Проверка свободного места для пакетов, во время выполнения
+@command{nncp-bundle -rx}.
+
+@end itemize
+
 @node Релиз 5.1.2
 @subsection Релиз 5.1.2
 @itemize
index 62759802b3263c52e89d50c7ccfea40450d9c356..461355755cd5b498f507e3768d5acc9a3ea83730 100644 (file)
@@ -3,6 +3,24 @@
 
 See also this page @ref{Новости, on russian}.
 
+@node Release 5.2.0
+@section Release 5.2.0
+@itemize
+
+@item
+Most commands by default show oneline operations progress.
+@option{-progress}, @option{-noprogress} command line options,
+@option{noprogress} configuration file option appeared.
+
+@item
+Fixed incorrect @command{nncp-check} command return code, that returned
+bad code when everything is good.
+
+@item
+Free disk space check during @command{nncp-bundle -rx} call.
+
+@end itemize
+
 @node Release 5.1.2
 @section Release 5.1.2
 @itemize
index 2ded27ca827a3286b2eac472973d23b2115498c4..1cc1ea1b84fec00ddf23c9d484245da1695aff91 100644 (file)
@@ -6,11 +6,11 @@ be buggy. It does not contain compiled documentation and dependent
 libraries source code. Because of that, it is recommended for porters
 to use @ref{Tarballs, tarballs} instead.
 
-@verbatim
+@example
 $ git clone git://git.cypherpunks.ru/nncp.git
 $ cd nncp
 $ git checkout develop
-@end verbatim
+@end example
 
 Also there is mirror of dependent libraries for safety if their native
 repositories will be unavailable (they are seldom updated):
index 239b87f1eb6c0d995caa606a88f196a8114e15a6..d6be2d927f7977aca6b750a2c22b2152ea6b1179 100644 (file)
@@ -54,6 +54,7 @@ just an unsigned integer telling what body structure follows.
     Actually @emph{HALT} packet does not have any body, only the header
     with the type. It is also used in the first payload for padding to
     the maximum size.
+
 @verbatim
 +------+
 | HALT |
@@ -62,11 +63,13 @@ just an unsigned integer telling what body structure follows.
 
 @item INFO
     Information about the file we have for transmission.
+
 @verbatim
 +------+--------------------+
 | INFO | NICE | SIZE | HASH |
 +------+--------------------+
 @end verbatim
+
     @multitable @columnfractions 0.2 0.3 0.5
     @headitem @tab XDR type @tab Value
     @item Niceness @tab
@@ -83,11 +86,13 @@ just an unsigned integer telling what body structure follows.
 @item FREQ
     File transmission request. Ask remote side to queue the file for
     transmission.
+
 @verbatim
 +------+---------------+
 | FREQ | HASH | OFFSET |
 +------+---------------+
 @end verbatim
+
     @multitable @columnfractions 0.2 0.3 0.5
     @headitem @tab XDR type @tab Value
     @item Hash @tab
@@ -100,11 +105,13 @@ just an unsigned integer telling what body structure follows.
 
 @item FILE
     Chunk of file.
+
 @verbatim
 +------+-------------------------+
 | FILE | HASH | OFFSET | PAYLOAD |
 +------+-------------------------+
 @end verbatim
+
     @multitable @columnfractions 0.2 0.3 0.5
     @headitem @tab XDR type @tab Value
     @item Hash @tab
@@ -120,11 +127,13 @@ just an unsigned integer telling what body structure follows.
 
 @item DONE
     Signal remote side that we have successfully downloaded the file.
+
 @verbatim
 +------+------+
 | DONE | HASH |
 +------+------+
 @end verbatim
+
     @multitable @columnfractions 0.2 0.3 0.5
     @headitem @tab XDR type @tab Value
     @item Hash @tab
index c5c516de71ff198098f35e5aeafddac71401ccb0..98532ae803597b7784ec5eb8c9f0ae76cd4b6350 100644 (file)
@@ -5,7 +5,7 @@ Spool directory holds @ref{Encrypted, encrypted packets} received from
 remote nodes and queued for sending to them. It has the following
 example structure:
 
-@verbatim
+@example
 spool/tmp/
 spool/2WHB...OABQ/rx.lock
 spool/2WHB...OABQ/rx/5ZIB...UMKW.part
@@ -17,7 +17,7 @@ spool/BYRR...CG6Q/tx/AQUT...DGNT.seen
 spool/BYRR...CG6Q/tx/NSYY...ZUU6
 spool/BYRR...CG6Q/tx/VCSR...3VXX.seen
 spool/BYRR...CG6Q/tx/ZI5U...5RRQ
-@end verbatim
+@end example
 
 Except for @file{tmp}, all other directories are Base32-encoded node
 identifiers (@file{2WHB...OABQ}, @file{BYRR...CG6Q} in our example).
index 677d2ac524430804106ae31efa563f1dd9af991d..9e7e1780713c0a0fc089af373b135c51d762f59b 100644 (file)
@@ -79,10 +79,10 @@ IMAP4, как правило, нет). У вас легковесный, сжа
 
 Команды:
 
-@verbatim
+@example
 $ nncp-file file_i_want_to_send bob:
 $ nncp-file another_file bob:movie.avi
-@end verbatim
+@end example
 
 добавят в очередь отправки два файла для узла @emph{bob}.
 Выстрелил-и-забыл! Теперь это работа демона (или offline передачи)
@@ -105,12 +105,12 @@ NNCP поддерживает @ref{Niceness, приоритезацию траф
 раньше или позднее остальных. Почти все команды имеют соответствующую
 опцию:
 
-@verbatim
+@example
 $ nncp-file -nice FLASH myfile node:dst
 $ nncp-xfer -nice PRIORITY /mnt/shared
 $ nncp-call -nice NORMAL bob
 [...]
-@end verbatim
+@end example
 
 Огромные файлы могут быть разбиты на маленькие @ref{Chunked, части},
 давая возможность передачи, по сути, любых объёмов используя накопители
@@ -118,10 +118,10 @@ $ nncp-call -nice NORMAL bob
 
 Вы также можете использовать CD-ROM и ленточные накопители:
 
-@verbatim
+@example
 $ nncp-bundle -tx bob | cdrecord -tao -
 $ nncp-bundle -tx bob | dd of=/dev/sa0 bs=10240
-@end verbatim
+@end example
 
 @node UsecaseNoLinkRU
 @subsection Экстремальные наземные окружающие условия, нет связи
@@ -135,9 +135,9 @@ $ nncp-bundle -tx bob | dd of=/dev/sa0 bs=10240
 устройство (SD гораздо предпочтительнее!) хранения, подмонтируйте и
 запустите @ref{nncp-xfer}:
 
-@verbatim
+@example
 $ nncp-xfer -node bob /media/usbstick
-@end verbatim
+@end example
 
 чтобы скопировать все исходящие пакеты относящиеся к @emph{bob}.
 Используйте @option{-mkdir} опцию чтобы создать все необходимые
@@ -148,16 +148,16 @@ $ nncp-xfer -node bob /media/usbstick
 @emph{bob} и к @emph{alice}, то тогда просто не указывайте
 @option{-node} опцию, чтобы скопировать все доступные исходящие пакеты.
 
-@verbatim
+@example
 $ nncp-xfer /media/usbstick
-@end verbatim
+@end example
 
 Размонтируйте и передайте накопитель Бобу и Алисе. Когда они вставят
 накопитель в свои компьютеры, то выполнят точно такую же команду:
 
-@verbatim
+@example
 $ nncp-xfer /media/usbstick
-@end verbatim
+@end example
 
 чтобы найти все пакеты относящиеся к их узлу и локально скопируют для
 дальнейшей обработки. @command{nncp-xfer} это единственная команда
@@ -175,10 +175,10 @@ $ nncp-xfer /media/usbstick
 отсылать их. Они -- всего-лишь последовательность @ref{Encrypted,
 зашифрованных пакетов}, которые вы можете принять.
 
-@verbatim
+@example
 $ nncp-bundle -tx alice bob eve ... | команда для отправки широковещательной рассылки
 $ команда для приёма широковещательной рассылки | nncp-bundle -rx
-@end verbatim
+@end example
 
 Встроенная возможность определять дубляжи пакетов позволит вам
 переотправлять широковещательные рассылки время от времени, повышая
@@ -243,15 +243,17 @@ $ команда для приёма широковещательной расс
 отослать полезную нагрузку сразу же в самом первом пакете) безопасный
 транспорт с свойством совершенной прямой секретности.
 
-@verbatim
-$ nncp-daemon -bind [::]:5400
-@end verbatim
+@example
+$ nncp-daemon -bind "[::]":5400
+@end example
+
 запустит TCP демон, который будет слушать входящие соединения на всех
 интерфейсах.
 
-@verbatim
+@example
 $ nncp-call bob
-@end verbatim
+@end example
+
 попытается подключиться к известному TCP-адресу узла @emph{bob} (взятого
 из конфигурационного файла), послать все связанные с ним исходящие
 пакеты и получить от него. Все прерванные передачи будут автоматически
index 113089dfa23383feb0c4a79813bea7c1988d2cc5..e118da4ea8088858f4931b3c85de041e7e37a1c7 100644 (file)
@@ -77,10 +77,10 @@ daemon}.
 
 The command:
 
-@verbatim
+@example
 $ nncp-file file_i_want_to_send bob:
 $ nncp-file another_file bob:movie.avi
-@end verbatim
+@end example
 
 will queue two files for sending to @emph{bob} node. Fire and forget!
 Now this is daemon's job (or offline transfer) to send this files part
@@ -100,12 +100,12 @@ NNCP allows traffic @ref{Niceness, prioritizing}: each packet has
 niceness level, that will guarantee that it will be processed earlier or
 later than the other ones. Nearly all commands has corresponding option:
 
-@verbatim
+@example
 $ nncp-file -nice FLASH myfile node:dst
 $ nncp-xfer -nice PRIORITY /mnt/shared
 $ nncp-call -nice NORMAL bob
 [...]
-@end verbatim
+@end example
 
 Huge files could be split on smaller @ref{Chunked, chunks}, giving
 possibility to transfer virtually any volumes using small capacity
@@ -113,10 +113,10 @@ storages.
 
 You can also use CD-ROM and tape drives:
 
-@verbatim
+@example
 $ nncp-bundle -tx bob | cdrecord -tao -
 $ nncp-bundle -tx bob | dd of=/dev/sa0 bs=10240
-@end verbatim
+@end example
 
 @node UsecaseNoLink
 @section Extreme terrestrial environments, no link
@@ -128,9 +128,9 @@ media for transferring packets to other nodes.
 Assume that you send two files to @emph{bob} node. Insert USB storage
 device (SD is preferable!), mount it and run @ref{nncp-xfer}:
 
-@verbatim
+@example
 $ nncp-xfer -node bob /media/usbstick
-@end verbatim
+@end example
 
 to copy all outbound packets related to @emph{bob}. Use @option{-mkdir}
 option to create related directory on USB/SD storage if they are missing
@@ -140,16 +140,16 @@ If you use single storage device to transfer data both to @emph{bob} and
 @emph{alice}, then just omit @option{-node} option to copy all available
 outgoing packets.
 
-@verbatim
+@example
 $ nncp-xfer /media/usbstick
-@end verbatim
+@end example
 
 Unmount it and transfer storage to Bob and Alice. When they will insert
 it in their computers, they will use exactly the same command:
 
-@verbatim
+@example
 $ nncp-xfer /media/usbstick
-@end verbatim
+@end example
 
 to find all packets related to their node and copy them locally for
 further processing. @command{nncp-xfer} is the only command used with
@@ -165,10 +165,10 @@ example, satellite's broadcasting signal. You are not able to use online
 You can use @ref{Bundles, bundles} and stream them above. They are just
 a sequence of @ref{Encrypted, encrypted packets} you can catch on.
 
-@verbatim
+@example
 $ nncp-bundle -tx alice bob eve ... | command to send broadcast
 $ command to receive broadcast | nncp-bundle -rx
-@end verbatim
+@end example
 
 With built-in packet duplicates detection ability, you can retransmit
 your broadcasts from time to time, to increase chances the recipient
@@ -229,15 +229,17 @@ authenticate peers and provide effective (both participants send payload
 in the very first packet) secure transport with forward secrecy
 property.
 
-@verbatim
-$ nncp-daemon -bind [::]:5400
-@end verbatim
+@example
+$ nncp-daemon -bind "[::]":5400
+@end example
+
 will start TCP daemon listening on all interfaces for incoming
 connections.
 
-@verbatim
+@example
 $ nncp-call bob
-@end verbatim
+@end example
+
 will try to connect to @emph{bob}'s node known TCP addresses (taken from
 configuration file) and send all related outbound packets and retrieve
 those the Bob has. All interrupted transfers will be automatically
index e010e46a0acf0b8a4589e430905b566369f24096..31bf18aafba1b8e20c2d732497f6af2538351e39 100644 (file)
@@ -1,7 +1,7 @@
 # $FreeBSD: head/net/nncp/Makefile 517819 2019-11-17 11:51:56Z dmgk $
 
 PORTNAME=      nncp
-DISTVERSION=   5.1.2
+DISTVERSION=   5.2.0
 CATEGORIES=    net
 MASTER_SITES=  http://www.nncpgo.org/download/
 
index fd57bd1f283eaca0985adfc0e1bb8c4b9ff4ef01..034ba286884496da6f2a7384ef6c18e0652ded22 100644 (file)
@@ -19,7 +19,6 @@ package nncp
 
 import (
        "net"
-       "strconv"
 
        "github.com/gorhill/cronexpr"
 )
@@ -77,17 +76,17 @@ func (ctx *Ctx) CallNode(
                        state.Wait()
                        ctx.LogI("call-finish", SDS{
                                "node":     state.Node.Id,
-                               "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10),
-                               "rxbytes":  strconv.FormatInt(state.RxBytes, 10),
-                               "txbytes":  strconv.FormatInt(state.TxBytes, 10),
-                               "rxspeed":  strconv.FormatInt(state.RxSpeed, 10),
-                               "txspeed":  strconv.FormatInt(state.TxSpeed, 10),
+                               "duration": int64(state.Duration.Seconds()),
+                               "rxbytes":  state.RxBytes,
+                               "txbytes":  state.TxBytes,
+                               "rxspeed":  state.RxSpeed,
+                               "txspeed":  state.TxSpeed,
                        }, "")
                        isGood = true
                        conn.Close()
                        break
                } else {
-                       ctx.LogE("call-start", SdsAdd(sds, SDS{"err": err}), "")
+                       ctx.LogE("call-start", sds, err, "")
                        conn.Close()
                }
        }
index edc132a37ffc403a09dd55221871ceb07fb15a27..615e382410a6f52bd067e40717356a3b9c6e0950 100644 (file)
@@ -108,6 +108,8 @@ type CfgJSON struct {
        Log   string `json:"log"`
        Umask string `json:"umask",omitempty`
 
+       OmitPrgrs bool `json:"noprogress",omitempty`
+
        Notify *NotifyJSON `json:"notify,omitempty"`
 
        Self  *NodeOurJSON        `json:"self"`
@@ -423,10 +425,15 @@ func CfgParse(data []byte) (*Ctx, error) {
                rInt := int(r)
                umaskForce = &rInt
        }
+       showPrgrs := true
+       if cfgJSON.OmitPrgrs {
+               showPrgrs = false
+       }
        ctx := Ctx{
                Spool:      spoolPath,
                LogPath:    logPath,
                UmaskForce: umaskForce,
+               ShowPrgrs:  showPrgrs,
                Self:       self,
                Neigh:      make(map[NodeId]*Node, len(cfgJSON.Neigh)),
                Alias:      make(map[string]*NodeId),
index fdd6436f9049806ebe46d054a609f6b0839ccf33..4c1f542452420bda9a3446208c2dfc4b4a0a499c 100644 (file)
@@ -20,46 +20,47 @@ package nncp
 import (
        "bufio"
        "bytes"
+       "errors"
        "io"
        "log"
 
        "golang.org/x/crypto/blake2b"
 )
 
-func Check(src io.Reader, checksum []byte) (bool, error) {
+func Check(src io.Reader, checksum []byte, sds SDS, showPrgrs bool) (bool, error) {
        hsh, err := blake2b.New256(nil)
        if err != nil {
                log.Fatalln(err)
        }
-       if _, err = io.Copy(hsh, bufio.NewReader(src)); err != nil {
+       if _, err = CopyProgressed(hsh, bufio.NewReader(src), sds, showPrgrs); err != nil {
                return false, err
        }
        return bytes.Compare(hsh.Sum(nil), checksum) == 0, nil
 }
 
-func (ctx *Ctx) checkXx(nodeId *NodeId, xx TRxTx) bool {
+func (ctx *Ctx) checkXxIsBad(nodeId *NodeId, xx TRxTx) bool {
        isBad := false
        for job := range ctx.Jobs(nodeId, xx) {
                sds := SDS{
-                       "xx":   string(xx),
-                       "node": nodeId,
-                       "pkt":  ToBase32(job.HshValue[:]),
+                       "xx":       string(xx),
+                       "node":     nodeId,
+                       "pkt":      ToBase32(job.HshValue[:]),
+                       "fullsize": job.Size,
                }
-               ctx.LogP("check", sds, "")
-               gut, err := Check(job.Fd, job.HshValue[:])
+               gut, err := Check(job.Fd, job.HshValue[:], sds, ctx.ShowPrgrs)
                job.Fd.Close()
                if err != nil {
-                       ctx.LogE("check", SdsAdd(sds, SDS{"err": err}), "")
-                       return false
+                       ctx.LogE("check", sds, err, "")
+                       return true
                }
                if !gut {
                        isBad = true
-                       ctx.LogE("check", sds, "bad")
+                       ctx.LogE("check", sds, errors.New("bad"), "")
                }
        }
        return isBad
 }
 
 func (ctx *Ctx) Check(nodeId *NodeId) bool {
-       return ctx.checkXx(nodeId, TRx) || ctx.checkXx(nodeId, TTx)
+       return !(ctx.checkXxIsBad(nodeId, TRx) || ctx.checkXxIsBad(nodeId, TTx))
 }
index 60bb1130c83d09559084b633d7f5c3f9cea1c5d7..0bca23f69c1ad5d502c05188ed416823b7079f91 100644 (file)
@@ -22,6 +22,7 @@ import (
        "archive/tar"
        "bufio"
        "bytes"
+       "errors"
        "flag"
        "fmt"
        "io"
@@ -29,7 +30,6 @@ import (
        "log"
        "os"
        "path/filepath"
-       "strconv"
        "strings"
 
        xdr "github.com/davecgh/go-xdr/xdr2"
@@ -63,6 +63,8 @@ func main() {
                spoolPath = flag.String("spool", "", "Override path to spool")
                logPath   = flag.String("log", "", "Override path to logfile")
                quiet     = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
                debug     = flag.Bool("debug", false, "Print debug messages")
                version   = flag.Bool("version", false, "Print version information")
                warranty  = flag.Bool("warranty", false, "Print warranty information")
@@ -88,7 +90,15 @@ func main() {
                log.Fatalln("At least one of -rx and -tx must be specified")
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
@@ -142,7 +152,14 @@ func main() {
                                }); err != nil {
                                        log.Fatalln("Error writing tar header:", err)
                                }
-                               if _, err = io.Copy(tarWr, job.Fd); err != nil {
+                               if _, err = nncp.CopyProgressed(
+                                       tarWr, job.Fd,
+                                       nncp.SdsAdd(sds, nncp.SDS{
+                                               "pkt":      nncp.ToBase32(job.HshValue[:]),
+                                               "fullsize": job.Size,
+                                       }),
+                                       ctx.ShowPrgrs,
+                               ); err != nil {
                                        log.Fatalln("Error during copying to tar:", err)
                                }
                                job.Fd.Close()
@@ -157,9 +174,7 @@ func main() {
                                                log.Fatalln("Error during deletion:", err)
                                        }
                                }
-                               ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{
-                                       "size": strconv.FormatInt(job.Size, 10),
-                               }), "")
+                               ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{"size": job.Size}), "")
                        }
                }
                if err = tarWr.Close(); err != nil {
@@ -224,9 +239,13 @@ func main() {
                                ctx.LogD("nncp-bundle", sds, "Too small packet")
                                continue
                        }
+                       if !ctx.IsEnoughSpace(entry.Size) {
+                               ctx.LogE("nncp-bundle", sds, errors.New("not enough spool space"), "")
+                               continue
+                       }
                        pktName = filepath.Base(entry.Name)
                        if _, err = nncp.FromBase32(pktName); err != nil {
-                               ctx.LogD("nncp-bundle", sds, "Bad packet name")
+                               ctx.LogD("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{"err": "bad packet name"}), "")
                                continue
                        }
                        if _, err = io.ReadFull(tarR, pktEncBuf); err != nil {
@@ -273,7 +292,11 @@ func main() {
                                if _, err = hsh.Write(pktEncBuf); err != nil {
                                        log.Fatalln("Error during writing:", err)
                                }
-                               if _, err = io.Copy(hsh, tarR); err != nil {
+                               if _, err = nncp.CopyProgressed(
+                                       hsh, tarR,
+                                       nncp.SdsAdd(sds, nncp.SDS{"fullsize": entry.Size}),
+                                       ctx.ShowPrgrs,
+                               ); err != nil {
                                        log.Fatalln("Error during copying:", err)
                                }
                                if nncp.ToBase32(hsh.Sum(nil)) == pktName {
@@ -282,7 +305,7 @@ func main() {
                                                os.Remove(dstPath)
                                        }
                                } else {
-                                       ctx.LogE("nncp-bundle", sds, "bad checksum")
+                                       ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "")
                                }
                                continue
                        }
@@ -298,6 +321,7 @@ func main() {
                        }
                        sds["node"] = nncp.ToBase32(pktEnc.Recipient[:])
                        sds["pkt"] = pktName
+                       sds["fullsize"] = entry.Size
                        selfPath = filepath.Join(ctx.Spool, ctx.SelfId.String(), string(nncp.TRx))
                        dstPath = filepath.Join(selfPath, pktName)
                        if _, err = os.Stat(dstPath); err == nil || !os.IsNotExist(err) {
@@ -317,11 +341,11 @@ func main() {
                                        if _, err = hsh.Write(pktEncBuf); err != nil {
                                                log.Fatalln("Error during writing:", err)
                                        }
-                                       if _, err = io.Copy(hsh, tarR); err != nil {
+                                       if _, err = nncp.CopyProgressed(hsh, tarR, sds, ctx.ShowPrgrs); err != nil {
                                                log.Fatalln("Error during copying:", err)
                                        }
                                        if nncp.ToBase32(hsh.Sum(nil)) != pktName {
-                                               ctx.LogE("nncp-bundle", sds, "bad checksum")
+                                               ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "")
                                                continue
                                        }
                                } else {
@@ -332,7 +356,7 @@ func main() {
                                        if _, err = tmp.W.Write(pktEncBuf); err != nil {
                                                log.Fatalln("Error during writing:", err)
                                        }
-                                       if _, err = io.Copy(tmp.W, tarR); err != nil {
+                                       if _, err = nncp.CopyProgressed(tmp.W, tarR, sds, ctx.ShowPrgrs); err != nil {
                                                log.Fatalln("Error during copying:", err)
                                        }
                                        if err = tmp.W.Flush(); err != nil {
@@ -343,14 +367,14 @@ func main() {
                                                        log.Fatalln("Error during commiting:", err)
                                                }
                                        } else {
-                                               ctx.LogE("nncp-bundle", sds, "bad checksum")
+                                               ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "")
                                                tmp.Cancel()
                                                continue
                                        }
                                }
                        } else {
                                if *dryRun {
-                                       if _, err = io.Copy(ioutil.Discard, tarR); err != nil {
+                                       if _, err = nncp.CopyProgressed(ioutil.Discard, tarR, sds, ctx.ShowPrgrs); err != nil {
                                                log.Fatalln("Error during copying:", err)
                                        }
                                } else {
@@ -362,7 +386,7 @@ func main() {
                                        if _, err = bufTmp.Write(pktEncBuf); err != nil {
                                                log.Fatalln("Error during writing:", err)
                                        }
-                                       if _, err = io.Copy(bufTmp, tarR); err != nil {
+                                       if _, err = nncp.CopyProgressed(bufTmp, tarR, sds, ctx.ShowPrgrs); err != nil {
                                                log.Fatalln("Error during copying:", err)
                                        }
                                        if err = bufTmp.Flush(); err != nil {
@@ -384,7 +408,7 @@ func main() {
                                }
                        }
                        ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{
-                               "size": strconv.FormatInt(entry.Size, 10),
+                               "size": sds["fullsize"],
                        }), "")
                }
        }
index dde658ff3a7b938b9037e2d6403b9c2814a6cbab..0072e982e22345fe250e49edb0e0dfdfca2644e1 100644 (file)
@@ -49,6 +49,8 @@ func main() {
                spoolPath   = flag.String("spool", "", "Override path to spool")
                logPath     = flag.String("log", "", "Override path to logfile")
                quiet       = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs   = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs   = flag.Bool("noprogress", false, "Omit progress showing")
                debug       = flag.Bool("debug", false, "Print debug messages")
                version     = flag.Bool("version", false, "Print version information")
                warranty    = flag.Bool("warranty", false, "Print warranty information")
@@ -78,7 +80,15 @@ func main() {
                log.Fatalln("-rx and -tx can not be set simultaneously")
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index e97ea8555f4e6d91dda10d5500f529cde4c59fdc..7297350d41b746efa23eb5380726eac15c19e2a9 100644 (file)
@@ -19,11 +19,11 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 package main
 
 import (
+       "errors"
        "flag"
        "fmt"
        "log"
        "os"
-       "strconv"
        "sync"
        "time"
 
@@ -44,6 +44,8 @@ func main() {
                spoolPath = flag.String("spool", "", "Override path to spool")
                logPath   = flag.String("log", "", "Override path to logfile")
                quiet     = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
                debug     = flag.Bool("debug", false, "Print debug messages")
                version   = flag.Bool("version", false, "Print version information")
                warranty  = flag.Bool("warranty", false, "Print warranty information")
@@ -59,7 +61,15 @@ func main() {
                return
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
@@ -105,13 +115,13 @@ func main() {
                                } else {
                                        addrs = append(addrs, *call.Addr)
                                }
-                               sds := nncp.SDS{"node": node.Id, "callindex": strconv.Itoa(i)}
+                               sds := nncp.SDS{"node": node.Id, "callindex": i}
                                for {
                                        n := time.Now()
                                        t := call.Cron.Next(n)
                                        ctx.LogD("caller", sds, t.String())
                                        if t.IsZero() {
-                                               ctx.LogE("caller", sds, "got zero time")
+                                               ctx.LogE("caller", sds, errors.New("got zero time"), "")
                                                return
                                        }
                                        time.Sleep(t.Sub(n))
index 7214079040daacfb0ad7f85c190130caa60ddedb..43d5b3ef128d44fb9edf825067877a8faa956595 100644 (file)
@@ -52,7 +52,7 @@ func main() {
                return
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false)
+       ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false, false, false)
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 70bd1dcd4d190fb5bea47d8a59312748cb2fd372..6f43b69dff9a31e54ab958337412e2d65e8e2443 100644 (file)
@@ -102,6 +102,8 @@ func main() {
   log: %s
   # Enforce specified umask usage
   # umask: "022"
+  # Omit progress showing by default
+  # noprogress: true
 
   # Enable notification email sending
   # notify: {
index 186357ee3d458698d8993aa4fb664e34df6741db..77fae4a87a515ff0cb1fd7718e739e235da389c6 100644 (file)
@@ -41,6 +41,8 @@ func main() {
                spoolPath = flag.String("spool", "", "Override path to spool")
                logPath   = flag.String("log", "", "Override path to logfile")
                quiet     = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
                debug     = flag.Bool("debug", false, "Print debug messages")
                version   = flag.Bool("version", false, "Print version information")
                warranty  = flag.Bool("warranty", false, "Print warranty information")
@@ -56,7 +58,15 @@ func main() {
                return
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 897dc06f1d181f8c72614ad9279c08884158ee2b..3fdba332684f1e0e0cdfecd5128ff60049256ae9 100644 (file)
@@ -24,7 +24,6 @@ import (
        "log"
        "net"
        "os"
-       "strconv"
        "time"
 
        "go.cypherpunks.ru/nncp/v5"
@@ -73,18 +72,18 @@ func performSP(ctx *nncp.Ctx, conn nncp.ConnDeadlined, nice uint8) {
                state.Wait()
                ctx.LogI("call-finish", nncp.SDS{
                        "node":     state.Node.Id,
-                       "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10),
-                       "rxbytes":  strconv.FormatInt(state.RxBytes, 10),
-                       "txbytes":  strconv.FormatInt(state.TxBytes, 10),
-                       "rxspeed":  strconv.FormatInt(state.RxSpeed, 10),
-                       "txspeed":  strconv.FormatInt(state.TxSpeed, 10),
+                       "duration": state.Duration.Seconds(),
+                       "rxbytes":  state.RxBytes,
+                       "txbytes":  state.TxBytes,
+                       "rxspeed":  state.RxSpeed,
+                       "txspeed":  state.TxSpeed,
                }, "")
        } else {
                nodeId := "unknown"
                if state.Node != nil {
                        nodeId = state.Node.Id.String()
                }
-               ctx.LogE("call-start", nncp.SDS{"node": nodeId, "err": err}, "")
+               ctx.LogE("call-start", nncp.SDS{"node": nodeId}, err, "")
        }
 }
 
@@ -98,6 +97,8 @@ func main() {
                spoolPath = flag.String("spool", "", "Override path to spool")
                logPath   = flag.String("log", "", "Override path to logfile")
                quiet     = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
                debug     = flag.Bool("debug", false, "Print debug messages")
                version   = flag.Bool("version", false, "Print version information")
                warranty  = flag.Bool("warranty", false, "Print warranty information")
@@ -117,7 +118,15 @@ func main() {
                log.Fatalln(err)
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 54394e8863e99c786d5e76d2ae35023713f4d447..805577e77373ffc2689e4d63bc5bde222d8a9f43 100644 (file)
@@ -45,6 +45,8 @@ func main() {
                spoolPath    = flag.String("spool", "", "Override path to spool")
                logPath      = flag.String("log", "", "Override path to logfile")
                quiet        = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs    = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs    = flag.Bool("noprogress", false, "Omit progress showing")
                debug        = flag.Bool("debug", false, "Print debug messages")
                version      = flag.Bool("version", false, "Print version information")
                warranty     = flag.Bool("warranty", false, "Print warranty information")
@@ -72,7 +74,15 @@ func main() {
                log.Fatalln(err)
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 3257f2b71a39cb9ac38754896989b071910b5e77..9d975f7cfc3bca1f872f0da421fda426d0d63731 100644 (file)
@@ -51,6 +51,8 @@ func main() {
                spoolPath    = flag.String("spool", "", "Override path to spool")
                logPath      = flag.String("log", "", "Override path to logfile")
                quiet        = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs    = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs    = flag.Bool("noprogress", false, "Omit progress showing")
                debug        = flag.Bool("debug", false, "Print debug messages")
                version      = flag.Bool("version", false, "Print version information")
                warranty     = flag.Bool("warranty", false, "Print warranty information")
@@ -74,7 +76,15 @@ func main() {
                log.Fatalln(err)
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 25328875426a525d23035044449ccab3b784d286..62a851afd260b47867c209f0ab04abd5e751c39d 100644 (file)
@@ -46,6 +46,8 @@ func main() {
                spoolPath    = flag.String("spool", "", "Override path to spool")
                logPath      = flag.String("log", "", "Override path to logfile")
                quiet        = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs    = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs    = flag.Bool("noprogress", false, "Omit progress showing")
                debug        = flag.Bool("debug", false, "Print debug messages")
                version      = flag.Bool("version", false, "Print version information")
                warranty     = flag.Bool("warranty", false, "Print warranty information")
@@ -73,7 +75,15 @@ func main() {
                log.Fatalln(err)
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 97ca259e80fb907715f5a807e32f1d4d6fe77fe4..883d06f416aa4ebfaedc2a83fcbaa75035258787 100644 (file)
@@ -54,7 +54,7 @@ func main() {
                return
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, "", *logPath, false, *debug)
+       ctx, err := nncp.CtxFromCmdline(*cfgPath, "", *logPath, false, false, false, *debug)
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index cb9042a5c53d28809df08d089e5d00c5991f2e9d..aaf0c2dd90c76337818314f4dd192e5cb1c877c1 100644 (file)
@@ -132,7 +132,7 @@ func main() {
        _, err = xdr.Unmarshal(bytes.NewReader(beginning), &pktEnc)
        if err == nil && pktEnc.Magic == nncp.MagicNNCPEv4 {
                if *dump {
-                       ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false)
+                       ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false, false, false)
                        if err != nil {
                                log.Fatalln("Error during initialization:", err)
                        }
index 161e86f602ad53961ac2167d503b121f71e0960a..75aaf5a0aa06b036373e8aff2b10c9efc45b41ab 100644 (file)
@@ -22,6 +22,7 @@ import (
        "bufio"
        "bytes"
        "encoding/hex"
+       "errors"
        "flag"
        "fmt"
        "hash"
@@ -57,21 +58,18 @@ func process(ctx *nncp.Ctx, path string, keep, dryRun, stdout, dumpMeta bool) bo
        }
        var metaPkt nncp.ChunkedMeta
        if _, err = xdr.Unmarshal(fd, &metaPkt); err != nil {
-               ctx.LogE("nncp-reass", nncp.SDS{"path": path, "err": err}, "bad meta file")
+               ctx.LogE("nncp-reass", nncp.SDS{"path": path}, err, "bad meta file")
                return false
        }
        fd.Close()
        if metaPkt.Magic != nncp.MagicNNCPMv1 {
-               ctx.LogE("nncp-reass", nncp.SDS{"path": path, "err": nncp.BadMagic}, "")
+               ctx.LogE("nncp-reass", nncp.SDS{"path": path}, nncp.BadMagic, "")
                return false
        }
 
        metaName := filepath.Base(path)
        if !strings.HasSuffix(metaName, nncp.ChunkedSuffixMeta) {
-               ctx.LogE("nncp-reass", nncp.SDS{
-                       "path": path,
-                       "err":  "invalid filename suffix",
-               }, "")
+               ctx.LogE("nncp-reass", nncp.SDS{"path": path}, errors.New("invalid filename suffix"), "")
                return false
        }
        mainName := strings.TrimSuffix(metaName, nncp.ChunkedSuffixMeta)
@@ -108,10 +106,7 @@ func process(ctx *nncp.Ctx, path string, keep, dryRun, stdout, dumpMeta bool) bo
        for chunkNum, chunkPath := range chunksPaths {
                fi, err := os.Stat(chunkPath)
                if err != nil && os.IsNotExist(err) {
-                       ctx.LogI("nncp-reass", nncp.SDS{
-                               "path":  path,
-                               "chunk": strconv.Itoa(chunkNum),
-                       }, "missing")
+                       ctx.LogI("nncp-reass", nncp.SDS{"path": path, "chunk": chunkNum}, "missing")
                        allChunksExist = false
                        continue
                }
@@ -122,10 +117,11 @@ func process(ctx *nncp.Ctx, path string, keep, dryRun, stdout, dumpMeta bool) bo
                        badSize = uint64(fi.Size()) != metaPkt.ChunkSize
                }
                if badSize {
-                       ctx.LogE("nncp-reass", nncp.SDS{
-                               "path":  path,
-                               "chunk": strconv.Itoa(chunkNum),
-                       }, "invalid size")
+                       ctx.LogE(
+                               "nncp-reass",
+                               nncp.SDS{"path": path, "chunk": chunkNum},
+                               errors.New("invalid size"), "",
+                       )
                        allChunksExist = false
                }
        }
@@ -140,19 +136,27 @@ func process(ctx *nncp.Ctx, path string, keep, dryRun, stdout, dumpMeta bool) bo
                if err != nil {
                        log.Fatalln("Can not open file:", err)
                }
+               fi, err := fd.Stat()
+               if err != nil {
+                       log.Fatalln("Can not stat file:", err)
+               }
                hsh, err = blake2b.New256(nil)
                if err != nil {
                        log.Fatalln(err)
                }
-               if _, err = io.Copy(hsh, bufio.NewReader(fd)); err != nil {
+               if _, err = nncp.CopyProgressed(hsh, bufio.NewReader(fd), nncp.SDS{
+                       "pkt":      chunkPath,
+                       "fullsize": fi.Size(),
+               }, ctx.ShowPrgrs); err != nil {
                        log.Fatalln(err)
                }
                fd.Close()
                if bytes.Compare(hsh.Sum(nil), metaPkt.Checksums[chunkNum][:]) != 0 {
-                       ctx.LogE("nncp-reass", nncp.SDS{
-                               "path":  path,
-                               "chunk": strconv.Itoa(chunkNum),
-                       }, "checksum is bad")
+                       ctx.LogE(
+                               "nncp-reass",
+                               nncp.SDS{"path": path, "chunk": chunkNum},
+                               errors.New("checksum is bad"), "",
+                       )
                        allChecksumsGood = false
                }
        }
@@ -187,16 +191,20 @@ func process(ctx *nncp.Ctx, path string, keep, dryRun, stdout, dumpMeta bool) bo
                if err != nil {
                        log.Fatalln("Can not open file:", err)
                }
-               if _, err = io.Copy(dstW, bufio.NewReader(fd)); err != nil {
+               fi, err := fd.Stat()
+               if err != nil {
+                       log.Fatalln("Can not stat file:", err)
+               }
+               if _, err = nncp.CopyProgressed(dstW, bufio.NewReader(fd), nncp.SDS{
+                       "pkt":      chunkPath,
+                       "fullsize": fi.Size(),
+               }, ctx.ShowPrgrs); err != nil {
                        log.Fatalln(err)
                }
                fd.Close()
                if !keep {
                        if err = os.Remove(chunkPath); err != nil {
-                               ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{
-                                       "chunk": strconv.Itoa(chunkNum),
-                                       "err":   err,
-                               }), "")
+                               ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{"chunk": chunkNum}), err, "")
                                hasErrors = true
                        }
                }
@@ -213,7 +221,7 @@ func process(ctx *nncp.Ctx, path string, keep, dryRun, stdout, dumpMeta bool) bo
        ctx.LogD("nncp-reass", sds, "written")
        if !keep {
                if err = os.Remove(path); err != nil {
-                       ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "")
+                       ctx.LogE("nncp-reass", sds, err, "")
                        hasErrors = true
                }
        }
@@ -249,13 +257,13 @@ func findMetas(ctx *nncp.Ctx, dirPath string) []string {
        dir, err := os.Open(dirPath)
        defer dir.Close()
        if err != nil {
-               ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath, "err": err}, "")
+               ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath}, err, "")
                return nil
        }
        fis, err := dir.Readdir(0)
        dir.Close()
        if err != nil {
-               ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath, "err": err}, "")
+               ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath}, err, "")
                return nil
        }
        metaPaths := make([]string, 0)
@@ -279,6 +287,8 @@ func main() {
                spoolPath = flag.String("spool", "", "Override path to spool")
                logPath   = flag.String("log", "", "Override path to logfile")
                quiet     = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
                debug     = flag.Bool("debug", false, "Print debug messages")
                version   = flag.Bool("version", false, "Print version information")
                warranty  = flag.Bool("warranty", false, "Print warranty information")
@@ -294,7 +304,15 @@ func main() {
                return
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 765ed4942660655fe8c252cafcd0bbe3548446e8..31c04921881c0e7c739877e19b0c2b2a5a15d73c 100644 (file)
@@ -70,23 +70,25 @@ func main() {
                return
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", *quiet, false, false, *debug)
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
        ctx.Umask()
 
        if *doTmp {
-               err = filepath.Walk(filepath.Join(ctx.Spool, "tmp"), func(path string, info os.FileInfo, err error) error {
-                       if err != nil {
-                               return err
-                       }
-                       if info.IsDir() {
-                               return nil
-                       }
-                       ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
-                       return os.Remove(path)
-               })
+               err = filepath.Walk(
+                       filepath.Join(ctx.Spool, "tmp"),
+                       func(path string, info os.FileInfo, err error) error {
+                               if err != nil {
+                                       return err
+                               }
+                               if info.IsDir() {
+                                       return nil
+                               }
+                               ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+                               return os.Remove(path)
+                       })
                if err != nil {
                        log.Fatalln("Error during walking:", err)
                }
@@ -120,34 +122,36 @@ func main() {
                log.Fatalln("Invalid -node specified:", err)
        }
        remove := func(xx nncp.TRxTx) error {
-               return filepath.Walk(filepath.Join(ctx.Spool, node.Id.String(), string(xx)), func(path string, info os.FileInfo, err error) error {
-                       if err != nil {
-                               return err
-                       }
-                       if info.IsDir() {
+               return filepath.Walk(
+                       filepath.Join(ctx.Spool, node.Id.String(), string(xx)),
+                       func(path string, info os.FileInfo, err error) error {
+                               if err != nil {
+                                       return err
+                               }
+                               if info.IsDir() {
+                                       return nil
+                               }
+                               if *doSeen && strings.HasSuffix(info.Name(), nncp.SeenSuffix) {
+                                       ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+                                       return os.Remove(path)
+                               }
+                               if *doPart && strings.HasSuffix(info.Name(), nncp.PartSuffix) {
+                                       ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+                                       return os.Remove(path)
+                               }
+                               if *pktRaw != "" && filepath.Base(info.Name()) == *pktRaw {
+                                       ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+                                       return os.Remove(path)
+                               }
+                               if !*doSeen &&
+                                       !*doPart &&
+                                       (*doRx || *doTx) &&
+                                       ((*doRx && xx == nncp.TRx) || (*doTx && xx == nncp.TTx)) {
+                                       ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+                                       return os.Remove(path)
+                               }
                                return nil
-                       }
-                       if *doSeen && strings.HasSuffix(info.Name(), nncp.SeenSuffix) {
-                               ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
-                               return os.Remove(path)
-                       }
-                       if *doPart && strings.HasSuffix(info.Name(), nncp.PartSuffix) {
-                               ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
-                               return os.Remove(path)
-                       }
-                       if *pktRaw != "" && filepath.Base(info.Name()) == *pktRaw {
-                               ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
-                               return os.Remove(path)
-                       }
-                       if !*doSeen &&
-                               !*doPart &&
-                               (*doRx || *doTx) &&
-                               ((*doRx && xx == nncp.TRx) || (*doTx && xx == nncp.TTx)) {
-                               ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
-                               return os.Remove(path)
-                       }
-                       return nil
-               })
+                       })
        }
        if *pktRaw != "" || *doRx || *doSeen || *doPart {
                if err = remove(nncp.TRx); err != nil {
index c03745a5241e9361ae83626386af2e9dbb067f35..f0588922d4ff5a351f339ec564ff6e670fc5e7fd 100644 (file)
@@ -56,7 +56,7 @@ func main() {
                return
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", false, *debug)
+       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", false, false, false, *debug)
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index 3f31ff9fb41d27adcff5d930e224e42a83b909e6..5cf0a933496fc546ba679dec8314917033253134 100644 (file)
@@ -50,6 +50,8 @@ func main() {
                spoolPath = flag.String("spool", "", "Override path to spool")
                logPath   = flag.String("log", "", "Override path to logfile")
                quiet     = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
                debug     = flag.Bool("debug", false, "Print debug messages")
                version   = flag.Bool("version", false, "Print version information")
                warranty  = flag.Bool("warranty", false, "Print warranty information")
@@ -69,7 +71,15 @@ func main() {
                log.Fatalln(err)
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
index c784b4e6bfade94a0105447ac95a595133f154d2..df7acd3b9cc3f0b2b881f4f85073ff51ec40e2b7 100644 (file)
@@ -20,13 +20,13 @@ package main
 
 import (
        "bufio"
+       "errors"
        "flag"
        "fmt"
        "io"
        "log"
        "os"
        "path/filepath"
-       "strconv"
 
        xdr "github.com/davecgh/go-xdr/xdr2"
        "go.cypherpunks.ru/nncp/v5"
@@ -51,6 +51,8 @@ func main() {
                spoolPath = flag.String("spool", "", "Override path to spool")
                logPath   = flag.String("log", "", "Override path to logfile")
                quiet     = flag.Bool("quiet", false, "Print only errors")
+               showPrgrs = flag.Bool("progress", false, "Force progress showing")
+               omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
                debug     = flag.Bool("debug", false, "Print debug messages")
                version   = flag.Bool("version", false, "Print version information")
                warranty  = flag.Bool("warranty", false, "Print warranty information")
@@ -77,7 +79,15 @@ func main() {
                log.Fatalln("-rx and -tx can not be set simultaneously")
        }
 
-       ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+       ctx, err := nncp.CtxFromCmdline(
+               *cfgPath,
+               *spoolPath,
+               *logPath,
+               *quiet,
+               *showPrgrs,
+               *omitPrgrs,
+               *debug,
+       )
        if err != nil {
                log.Fatalln("Error during initialization:", err)
        }
@@ -107,20 +117,20 @@ func main() {
                        ctx.LogD("nncp-xfer", sds, "no dir")
                        goto Tx
                }
-               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat")
+               ctx.LogE("nncp-xfer", sds, err, "stat")
                isBad = true
                goto Tx
        }
        dir, err = os.Open(selfPath)
        if err != nil {
-               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open")
+               ctx.LogE("nncp-xfer", sds, err, "open")
                isBad = true
                goto Tx
        }
        fis, err = dir.Readdir(0)
        dir.Close()
        if err != nil {
-               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "read")
+               ctx.LogE("nncp-xfer", sds, err, "read")
                isBad = true
                goto Tx
        }
@@ -144,14 +154,14 @@ func main() {
                }
                dir, err = os.Open(filepath.Join(selfPath, fi.Name()))
                if err != nil {
-                       ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open")
+                       ctx.LogE("nncp-xfer", sds, err, "open")
                        isBad = true
                        continue
                }
                fisInt, err := dir.Readdir(0)
                dir.Close()
                if err != nil {
-                       ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "read")
+                       ctx.LogE("nncp-xfer", sds, err, "read")
                        isBad = true
                        continue
                }
@@ -159,12 +169,16 @@ func main() {
                        if !fi.IsDir() {
                                continue
                        }
+                       // Check that it is valid Base32 encoding
+                       if _, err = nncp.NodeIdFromString(fiInt.Name()); err != nil {
+                               continue
+                       }
                        filename := filepath.Join(dir.Name(), fiInt.Name())
                        sds["file"] = filename
                        delete(sds, "size")
                        fd, err := os.Open(filename)
                        if err != nil {
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open")
+                               ctx.LogE("nncp-xfer", sds, err, "open")
                                isBad = true
                                continue
                        }
@@ -180,9 +194,9 @@ func main() {
                                fd.Close()
                                continue
                        }
-                       sds["size"] = strconv.FormatInt(fiInt.Size(), 10)
+                       sds["size"] = fiInt.Size()
                        if !ctx.IsEnoughSpace(fiInt.Size()) {
-                               ctx.LogE("nncp-xfer", sds, "is not enough space")
+                               ctx.LogE("nncp-xfer", sds, errors.New("is not enough space"), "")
                                fd.Close()
                                continue
                        }
@@ -191,14 +205,28 @@ func main() {
                        if err != nil {
                                log.Fatalln(err)
                        }
-                       if _, err = io.CopyN(tmp.W, bufio.NewReader(fd), fiInt.Size()); err != nil {
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "copy")
+                       r, w := io.Pipe()
+                       go func() {
+                               _, err := io.CopyN(w, bufio.NewReader(fd), fiInt.Size())
+                               if err == nil {
+                                       w.Close()
+                                       return
+                               }
+                               ctx.LogE("nncp-xfer", sds, err, "copy")
+                               w.CloseWithError(err)
+                       }()
+                       if _, err = nncp.CopyProgressed(tmp.W, r, nncp.SdsAdd(sds, nncp.SDS{
+                               "pkt":      filename,
+                               "fullsize": sds["size"],
+                       }), ctx.ShowPrgrs); err != nil {
+                               ctx.LogE("nncp-xfer", sds, err, "copy")
                                isBad = true
-                               fd.Close()
+                       }
+                       fd.Close()
+                       if isBad {
                                tmp.Cancel()
                                continue
                        }
-                       fd.Close()
                        if err = tmp.Commit(filepath.Join(
                                ctx.Spool,
                                nodeId.String(),
@@ -209,7 +237,7 @@ func main() {
                        ctx.LogI("nncp-xfer", sds, "")
                        if !*keep {
                                if err = os.Remove(filename); err != nil {
-                                       ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "remove")
+                                       ctx.LogE("nncp-xfer", sds, err, "remove")
                                        isBad = true
                                }
                        }
@@ -246,13 +274,13 @@ Tx:
                                }
                                if err = os.Mkdir(nodePath, os.FileMode(0777)); err != nil {
                                        ctx.UnlockDir(dirLock)
-                                       ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mkdir")
+                                       ctx.LogE("nncp-xfer", sds, err, "mkdir")
                                        isBad = true
                                        continue
                                }
                        } else {
                                ctx.UnlockDir(dirLock)
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat")
+                               ctx.LogE("nncp-xfer", sds, err, "stat")
                                isBad = true
                                continue
                        }
@@ -264,13 +292,13 @@ Tx:
                        if os.IsNotExist(err) {
                                if err = os.Mkdir(dstPath, os.FileMode(0777)); err != nil {
                                        ctx.UnlockDir(dirLock)
-                                       ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mkdir")
+                                       ctx.LogE("nncp-xfer", sds, err, "mkdir")
                                        isBad = true
                                        continue
                                }
                        } else {
                                ctx.UnlockDir(dirLock)
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat")
+                               ctx.LogE("nncp-xfer", sds, err, "stat")
                                isBad = true
                                continue
                        }
@@ -296,7 +324,7 @@ Tx:
                        }
                        tmp, err := nncp.TempFile(dstPath, "xfer")
                        if err != nil {
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mktemp")
+                               ctx.LogE("nncp-xfer", sds, err, "mktemp")
                                job.Fd.Close()
                                isBad = true
                                break
@@ -304,45 +332,48 @@ Tx:
                        sds["tmp"] = tmp.Name()
                        ctx.LogD("nncp-xfer", sds, "created")
                        bufW := bufio.NewWriter(tmp)
-                       copied, err := io.Copy(bufW, bufio.NewReader(job.Fd))
+                       copied, err := nncp.CopyProgressed(
+                               bufW,
+                               bufio.NewReader(job.Fd),
+                               nncp.SdsAdd(sds, nncp.SDS{"fullsize": job.Size}),
+                               ctx.ShowPrgrs,
+                       )
                        job.Fd.Close()
                        if err != nil {
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "copy")
+                               ctx.LogE("nncp-xfer", sds, err, "copy")
                                tmp.Close()
                                isBad = true
                                continue
                        }
                        if err = bufW.Flush(); err != nil {
                                tmp.Close()
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "flush")
+                               ctx.LogE("nncp-xfer", sds, err, "flush")
                                isBad = true
                                continue
                        }
                        if err = tmp.Sync(); err != nil {
                                tmp.Close()
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "sync")
+                               ctx.LogE("nncp-xfer", sds, err, "sync")
                                isBad = true
                                continue
                        }
                        tmp.Close()
                        if err = os.Rename(tmp.Name(), filepath.Join(dstPath, pktName)); err != nil {
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "rename")
+                               ctx.LogE("nncp-xfer", sds, err, "rename")
                                isBad = true
                                continue
                        }
                        if err = nncp.DirSync(dstPath); err != nil {
-                               ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "sync")
+                               ctx.LogE("nncp-xfer", sds, err, "sync")
                                isBad = true
                                continue
                        }
                        os.Remove(filepath.Join(dstPath, pktName+".part"))
                        delete(sds, "tmp")
-                       ctx.LogI("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{
-                               "size": strconv.FormatInt(copied, 10),
-                       }), "")
+                       ctx.LogI("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"size": copied}), "")
                        if !*keep {
                                if err = os.Remove(job.Fd.Name()); err != nil {
-                                       ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "remove")
+                                       ctx.LogE("nncp-xfer", sds, err, "remove")
                                        isBad = true
                                }
                        }
index 8a1146e2b5651f44731cfe7de792ff3388c906a9..19c7b9c0177fb0e0abe86497f05ede78404d3719 100644 (file)
@@ -39,6 +39,7 @@ type Ctx struct {
        LogPath    string
        UmaskForce *int
        Quiet      bool
+       ShowPrgrs  bool
        Debug      bool
        NotifyFile *FromToJSON
        NotifyFreq *FromToJSON
@@ -64,23 +65,31 @@ func (ctx *Ctx) FindNode(id string) (*Node, error) {
 func (ctx *Ctx) ensureRxDir(nodeId *NodeId) error {
        dirPath := filepath.Join(ctx.Spool, nodeId.String(), string(TRx))
        if err := os.MkdirAll(dirPath, os.FileMode(0777)); err != nil {
-               ctx.LogE("dir-ensure", SDS{"dir": dirPath, "err": err}, "")
+               ctx.LogE("dir-ensure", SDS{"dir": dirPath}, err, "")
                return err
        }
        fd, err := os.Open(dirPath)
        if err != nil {
-               ctx.LogE("dir-ensure", SDS{"dir": dirPath, "err": err}, "")
+               ctx.LogE("dir-ensure", SDS{"dir": dirPath}, err, "")
                return err
        }
        fd.Close()
        return nil
 }
 
-func CtxFromCmdline(cfgPath, spoolPath, logPath string, quiet, debug bool) (*Ctx, error) {
+func CtxFromCmdline(
+       cfgPath,
+       spoolPath,
+       logPath string,
+       quiet, showPrgrs, omitPrgrs, debug bool,
+) (*Ctx, error) {
        env := os.Getenv(CfgPathEnv)
        if env != "" {
                cfgPath = env
        }
+       if showPrgrs && omitPrgrs {
+               return nil, errors.New("simultaneous -progress and -noprogress")
+       }
        cfgRaw, err := ioutil.ReadFile(cfgPath)
        if err != nil {
                return nil, err
@@ -105,6 +114,12 @@ func CtxFromCmdline(cfgPath, spoolPath, logPath string, quiet, debug bool) (*Ctx
        } else {
                ctx.LogPath = logPath
        }
+       if showPrgrs {
+               ctx.ShowPrgrs = true
+       }
+       if quiet || omitPrgrs {
+               ctx.ShowPrgrs = false
+       }
        ctx.Quiet = quiet
        ctx.Debug = debug
        return ctx, nil
index 14b615fbfdcc8369872aed37183d71d72c25b003..8ac19ea11ae7d44a62ccb1eea2a33256cab5ad54 100644 (file)
@@ -227,7 +227,7 @@ func (ctx *Ctx) Humanize(s string) string {
                }
                msg = fmt.Sprintf(
                        "Packet %s (%s) (nice %s)",
-                       sds["hash"],
+                       sds["pkt"],
                        size,
                        NicenessFmt(nice),
                )
@@ -254,7 +254,7 @@ func (ctx *Ctx) Humanize(s string) string {
                }
                msg += fmt.Sprintf("%s packets, %s", sds["pkts"], size)
        case "sp-process":
-               msg = fmt.Sprintf("%s has %s (%s): %s", nodeS, sds["hash"], size, rem)
+               msg = fmt.Sprintf("%s has %s (%s): %s", nodeS, sds["pkt"], size, rem)
        case "sp-file":
                switch sds["xx"] {
                case "rx":
@@ -274,7 +274,7 @@ func (ctx *Ctx) Humanize(s string) string {
                }
                msg += fmt.Sprintf(
                        "%s %d%% (%s / %s)",
-                       sds["hash"],
+                       sds["pkt"],
                        100*sizeParsed/fullsize,
                        humanize.IBytes(uint64(sizeParsed)),
                        humanize.IBytes(uint64(fullsize)),
@@ -282,9 +282,9 @@ func (ctx *Ctx) Humanize(s string) string {
        case "sp-done":
                switch sds["xx"] {
                case "rx":
-                       msg = fmt.Sprintf("Packet %s is retreived (%s)", sds["hash"], size)
+                       msg = fmt.Sprintf("Packet %s is retreived (%s)", sds["pkt"], size)
                case "tx":
-                       msg = fmt.Sprintf("Packet %s is sent", sds["hash"])
+                       msg = fmt.Sprintf("Packet %s is sent", sds["pkt"])
                default:
                        return s
                }
index 49df07b96e8b8c1f56445a040851644ff7fa18e6..705bb7e551f09c68e04e922a4570ba6e81b9c31b 100644 (file)
@@ -21,7 +21,6 @@ import (
        "io"
        "os"
        "path/filepath"
-       "strconv"
 
        xdr "github.com/davecgh/go-xdr/xdr2"
 )
@@ -73,8 +72,8 @@ func (ctx *Ctx) Jobs(nodeId *NodeId, xx TRxTx) chan Job {
                                "xx":   string(xx),
                                "node": pktEnc.Sender,
                                "name": fi.Name(),
-                               "nice": strconv.Itoa(int(pktEnc.Nice)),
-                               "size": strconv.FormatInt(fi.Size(), 10),
+                               "nice": int(pktEnc.Nice),
+                               "size": fi.Size(),
                        }, "taken")
                        job := Job{
                                PktEnc:   &pktEnc,
index 32f9f6465efd9e3fe04f35452a3014f91392b9bb..50e90f4dfe0b33ddace932e657d0b100361b18c8 100644 (file)
@@ -33,12 +33,12 @@ func (ctx *Ctx) LockDir(nodeId *NodeId, xx TRxTx) (*os.File, error) {
                os.FileMode(0666),
        )
        if err != nil {
-               ctx.LogE("lockdir", SDS{"path": lockPath, "err": err}, "")
+               ctx.LogE("lockdir", SDS{"path": lockPath}, err, "")
                return nil, err
        }
        err = unix.Flock(int(dirLock.Fd()), unix.LOCK_EX|unix.LOCK_NB)
        if err != nil {
-               ctx.LogE("lockdir", SDS{"path": lockPath, "err": err}, "")
+               ctx.LogE("lockdir", SDS{"path": lockPath}, err, "")
                dirLock.Close()
                return nil, err
        }
index b83db340170668ead8821ac96f67ea2903e8ebee..bc91043e2198352f26c4c13249da790885c59284 100644 (file)
@@ -40,7 +40,14 @@ func sdFmt(who string, sds SDS) string {
        result := make([]string, 0, 1+len(keys))
        result = append(result, "["+who)
        for _, k := range keys {
-               result = append(result, fmt.Sprintf(`%s="%s"`, k, sds[k]))
+               var value string
+               switch v := sds[k].(type) {
+               case int, int8, uint8, int64, uint64:
+                       value = fmt.Sprintf("%d", v)
+               default:
+                       value = fmt.Sprintf("%s", v)
+               }
+               result = append(result, fmt.Sprintf(`%s="%s"`, k, value))
        }
        return strings.Join(result, " ") + "]"
 }
@@ -103,13 +110,8 @@ func (ctx *Ctx) LogI(who string, sds SDS, msg string) {
        ctx.Log(msg)
 }
 
-func (ctx *Ctx) LogP(who string, sds SDS, msg string) {
-       if !ctx.Quiet {
-               fmt.Fprintln(os.Stderr, ctx.Humanize(msgFmt(LogLevel("P"), who, sds, msg)))
-       }
-}
-
-func (ctx *Ctx) LogE(who string, sds SDS, msg string) {
+func (ctx *Ctx) LogE(who string, sds SDS, err error, msg string) {
+       sds["err"] = err.Error()
        msg = msgFmt(LogLevel("E"), who, sds, msg)
        if len(msg) > 2048 {
                msg = msg[:2048]
diff --git a/src/progress.go b/src/progress.go
new file mode 100644 (file)
index 0000000..d071857
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+NNCP -- Node to Node copy, utilities for store-and-forward data exchange
+Copyright (C) 2016-2019 Sergey Matveev <stargrave@stargrave.org>
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, version 3 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+package nncp
+
+import (
+       "fmt"
+       "io"
+       "os"
+       "strings"
+       "sync"
+       "time"
+
+       "github.com/dustin/go-humanize"
+       "go.cypherpunks.ru/nncp/v5/uilive"
+)
+
+func init() {
+       uilive.Out = os.Stderr
+}
+
+var progressBars = make(map[string]*ProgressBar)
+var progressBarsLock sync.RWMutex
+
+type ProgressBar struct {
+       w       *uilive.Writer
+       hash    string
+       started time.Time
+       initial int64
+       full    int64
+}
+
+func ProgressBarNew(initial, full int64) *ProgressBar {
+       pb := ProgressBar{
+               w:       uilive.New(),
+               started: time.Now(),
+               initial: initial,
+               full:    full,
+       }
+       pb.w.Start()
+       return &pb
+}
+
+func (pb ProgressBar) Render(what string, size int64) {
+       now := time.Now().UTC()
+       timeDiff := now.Sub(pb.started).Seconds()
+       if timeDiff == 0 {
+               timeDiff = 1
+       }
+       percentage := int64(100)
+       if pb.full > 0 {
+               percentage = 100 * size / pb.full
+       }
+       fmt.Fprintf(
+               pb.w, "%s %s %s/%s %d%% (%s/sec)\n",
+               now.Format(time.RFC3339), what,
+               humanize.IBytes(uint64(size)),
+               humanize.IBytes(uint64(pb.full)),
+               percentage,
+               humanize.IBytes(uint64(float64(size-pb.initial)/timeDiff)),
+       )
+}
+
+func (pb ProgressBar) Kill() {
+       pb.w.Stop()
+}
+
+func CopyProgressed(
+       dst io.Writer,
+       src io.Reader,
+       sds SDS,
+       showPrgrs bool,
+) (written int64, err error) {
+       buf := make([]byte, EncBlkSize)
+       var nr, nw int
+       var er, ew error
+       for {
+               nr, er = src.Read(buf)
+               if nr > 0 {
+                       nw, ew = dst.Write(buf[:nr])
+                       if nw > 0 {
+                               written += int64(nw)
+                               if showPrgrs {
+                                       sds["size"] = written
+                                       Progress(sds)
+                               }
+                       }
+                       if ew != nil {
+                               err = ew
+                               break
+                       }
+                       if nr != nw {
+                               err = io.ErrShortWrite
+                               break
+                       }
+               }
+               if er != nil {
+                       if er != io.EOF {
+                               err = er
+                       }
+                       break
+               }
+       }
+       return
+}
+
+func Progress(sds SDS) {
+       pkt := sds["pkt"].(string)
+       var size int64
+       if sizeI, exists := sds["size"]; exists {
+               size = sizeI.(int64)
+       }
+       fullsize := sds["fullsize"].(int64)
+       progressBarsLock.RLock()
+       pb, exists := progressBars[pkt]
+       progressBarsLock.RUnlock()
+       if !exists {
+               progressBarsLock.Lock()
+               pb = ProgressBarNew(size, fullsize)
+               progressBars[pkt] = pb
+               progressBarsLock.Unlock()
+       }
+       what := pkt
+       if len(what) >= 52 { // Base32 encoded
+               what = what[:16] + ".." + what[len(what)-16:]
+       }
+       if xx, exists := sds["xx"]; exists {
+               what = strings.Title(xx.(string)) + " " + what
+       }
+       pb.Render(what, size)
+       if size >= fullsize {
+               pb.Kill()
+               progressBarsLock.Lock()
+               delete(progressBars, pkt)
+               progressBarsLock.Unlock()
+       }
+}
index 7b8fc3113b2db7d2111ba68bf7c1f0c346bc98cf..ce2e62f5d86be3492fc7a4192bca978c9c62b760 100644 (file)
--- a/src/sp.go
+++ b/src/sp.go
@@ -26,7 +26,6 @@ import (
        "os"
        "path/filepath"
        "sort"
-       "strconv"
        "sync"
        "time"
 
@@ -271,15 +270,15 @@ func (ctx *Ctx) infosOur(nodeId *NodeId, nice uint8, seen *map[[32]byte]uint8) [
                ctx.LogD("sp-info-our", SDS{
                        "node": nodeId,
                        "name": ToBase32(info.Hash[:]),
-                       "size": strconv.FormatInt(int64(info.Size), 10),
+                       "size": info.Size,
                }, "")
        }
        if totalSize > 0 {
                ctx.LogI("sp-infos", SDS{
                        "xx":   string(TTx),
                        "node": nodeId,
-                       "pkts": strconv.Itoa(len(payloads)),
-                       "size": strconv.FormatInt(totalSize, 10),
+                       "pkts": len(payloads),
+                       "size": totalSize,
                }, "")
        }
        return payloadsSplit(payloads)
@@ -348,31 +347,31 @@ func (state *SPState) StartI(conn ConnDeadlined) error {
                state.dirUnlock()
                return err
        }
-       sds := SDS{"node": nodeId, "nice": strconv.Itoa(int(state.Nice))}
+       sds := SDS{"node": nodeId, "nice": int(state.Nice)}
        state.Ctx.LogD("sp-start", sds, "sending first message")
        conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
        if err = state.WriteSP(conn, buf); err != nil {
-               state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+               state.Ctx.LogE("sp-start", sds, err, "")
                state.dirUnlock()
                return err
        }
        state.Ctx.LogD("sp-start", sds, "waiting for first message")
        conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second))
        if buf, err = state.ReadSP(conn); err != nil {
-               state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+               state.Ctx.LogE("sp-start", sds, err, "")
                state.dirUnlock()
                return err
        }
        payload, state.csOur, state.csTheir, err = state.hs.ReadMessage(nil, buf)
        if err != nil {
-               state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+               state.Ctx.LogE("sp-start", sds, err, "")
                state.dirUnlock()
                return err
        }
        state.Ctx.LogD("sp-start", sds, "starting workers")
        err = state.StartWorkers(conn, infosPayloads, payload)
        if err != nil {
-               state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+               state.Ctx.LogE("sp-start", sds, err, "")
                state.dirUnlock()
                return err
        }
@@ -403,18 +402,14 @@ func (state *SPState) StartR(conn ConnDeadlined) error {
        state.xxOnly = xxOnly
        var buf []byte
        var payload []byte
-       state.Ctx.LogD(
-               "sp-start",
-               SDS{"nice": strconv.Itoa(int(state.Nice))},
-               "waiting for first message",
-       )
+       state.Ctx.LogD("sp-start", SDS{"nice": int(state.Nice)}, "waiting for first message")
        conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second))
        if buf, err = state.ReadSP(conn); err != nil {
-               state.Ctx.LogE("sp-start", SDS{"err": err}, "")
+               state.Ctx.LogE("sp-start", SDS{}, err, "")
                return err
        }
        if payload, _, _, err = state.hs.ReadMessage(nil, buf); err != nil {
-               state.Ctx.LogE("sp-start", SDS{"err": err}, "")
+               state.Ctx.LogE("sp-start", SDS{}, err, "")
                return err
        }
 
@@ -427,7 +422,7 @@ func (state *SPState) StartR(conn ConnDeadlined) error {
        }
        if node == nil {
                peerId := ToBase32(state.hs.PeerStatic())
-               state.Ctx.LogE("sp-start", SDS{"peer": peerId}, "unknown")
+               state.Ctx.LogE("sp-start", SDS{"peer": peerId}, errors.New("unknown"), "")
                return errors.New("Unknown peer: " + peerId)
        }
        state.Node = node
@@ -435,7 +430,7 @@ func (state *SPState) StartR(conn ConnDeadlined) error {
        state.txRate = node.TxRate
        state.onlineDeadline = node.OnlineDeadline
        state.maxOnlineTime = node.MaxOnlineTime
-       sds := SDS{"node": node.Id, "nice": strconv.Itoa(int(state.Nice))}
+       sds := SDS{"node": node.Id, "nice": int(state.Nice)}
 
        if state.Ctx.ensureRxDir(node.Id); err != nil {
                return err
@@ -478,7 +473,7 @@ func (state *SPState) StartR(conn ConnDeadlined) error {
        }
        conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
        if err = state.WriteSP(conn, buf); err != nil {
-               state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+               state.Ctx.LogE("sp-start", sds, err, "")
                state.dirUnlock()
                return err
        }
@@ -495,13 +490,13 @@ func (state *SPState) StartWorkers(
        conn ConnDeadlined,
        infosPayloads [][]byte,
        payload []byte) error {
-       sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.Nice))}
+       sds := SDS{"node": state.Node.Id, "nice": int(state.Nice)}
        if len(infosPayloads) > 1 {
                go func() {
                        for _, payload := range infosPayloads[1:] {
                                state.Ctx.LogD(
                                        "sp-work",
-                                       SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+                                       SdsAdd(sds, SDS{"size": len(payload)}),
                                        "queuing remaining payload",
                                )
                                state.payloads <- payload
@@ -510,12 +505,12 @@ func (state *SPState) StartWorkers(
        }
        state.Ctx.LogD(
                "sp-work",
-               SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+               SdsAdd(sds, SDS{"size": len(payload)}),
                "processing first payload",
        )
        replies, err := state.ProcessSP(payload)
        if err != nil {
-               state.Ctx.LogE("sp-work", SdsAdd(sds, SDS{"err": err}), "")
+               state.Ctx.LogE("sp-work", sds, err, "")
                return err
        }
 
@@ -523,7 +518,7 @@ func (state *SPState) StartWorkers(
                for _, reply := range replies {
                        state.Ctx.LogD(
                                "sp-work",
-                               SdsAdd(sds, SDS{"size": strconv.Itoa(len(reply))}),
+                               SdsAdd(sds, SDS{"size": len(reply)}),
                                "queuing reply",
                        )
                        state.payloads <- reply
@@ -543,7 +538,7 @@ func (state *SPState) StartWorkers(
                                ) {
                                        state.Ctx.LogD(
                                                "sp-work",
-                                               SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+                                               SdsAdd(sds, SDS{"size": len(payload)}),
                                                "queuing new info",
                                        )
                                        state.payloads <- payload
@@ -567,7 +562,7 @@ func (state *SPState) StartWorkers(
                        case payload = <-state.payloads:
                                state.Ctx.LogD(
                                        "sp-xmit",
-                                       SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+                                       SdsAdd(sds, SDS{"size": len(payload)}),
                                        "got payload",
                                )
                        default:
@@ -589,8 +584,8 @@ func (state *SPState) StartWorkers(
 
                                sdsp := SdsAdd(sds, SDS{
                                        "xx":   string(TTx),
-                                       "hash": ToBase32(freq.Hash[:]),
-                                       "size": strconv.FormatInt(int64(freq.Offset), 10),
+                                       "pkt":  ToBase32(freq.Hash[:]),
+                                       "size": int64(freq.Offset),
                                })
                                state.Ctx.LogD("sp-file", sdsp, "queueing")
                                fd, err := os.Open(filepath.Join(
@@ -600,32 +595,32 @@ func (state *SPState) StartWorkers(
                                        ToBase32(freq.Hash[:]),
                                ))
                                if err != nil {
-                                       state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+                                       state.Ctx.LogE("sp-file", sdsp, err, "")
                                        break
                                }
                                fi, err := fd.Stat()
                                if err != nil {
-                                       state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+                                       state.Ctx.LogE("sp-file", sdsp, err, "")
                                        break
                                }
-                               fullSize := uint64(fi.Size())
+                               fullSize := fi.Size()
                                var buf []byte
-                               if freq.Offset < fullSize {
+                               if freq.Offset < uint64(fullSize) {
                                        state.Ctx.LogD("sp-file", sdsp, "seeking")
                                        if _, err = fd.Seek(int64(freq.Offset), io.SeekStart); err != nil {
-                                               state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+                                               state.Ctx.LogE("sp-file", sdsp, err, "")
                                                break
                                        }
                                        buf = make([]byte, MaxSPSize-SPHeadOverhead-SPFileOverhead)
                                        n, err := fd.Read(buf)
                                        if err != nil {
-                                               state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+                                               state.Ctx.LogE("sp-file", sdsp, err, "")
                                                break
                                        }
                                        buf = buf[:n]
                                        state.Ctx.LogD(
                                                "sp-file",
-                                               SdsAdd(sdsp, SDS{"size": strconv.Itoa(n)}),
+                                               SdsAdd(sdsp, SDS{"size": n}),
                                                "read",
                                        )
                                }
@@ -636,12 +631,14 @@ func (state *SPState) StartWorkers(
                                        Payload: buf,
                                })
                                ourSize := freq.Offset + uint64(len(buf))
-                               sdsp["size"] = strconv.FormatInt(int64(ourSize), 10)
-                               sdsp["fullsize"] = strconv.FormatInt(int64(fullSize), 10)
-                               state.Ctx.LogP("sp-file", sdsp, "")
+                               sdsp["size"] = int64(ourSize)
+                               sdsp["fullsize"] = fullSize
+                               if state.Ctx.ShowPrgrs {
+                                       Progress(sdsp)
+                               }
                                state.Lock()
                                if len(state.queueTheir) > 0 && *state.queueTheir[0].freq.Hash == *freq.Hash {
-                                       if ourSize == fullSize {
+                                       if ourSize == uint64(fullSize) {
                                                state.Ctx.LogD("sp-file", sdsp, "finished")
                                                if len(state.queueTheir) > 1 {
                                                        state.queueTheir = state.queueTheir[1:]
@@ -658,12 +655,12 @@ func (state *SPState) StartWorkers(
                        }
                        state.Ctx.LogD(
                                "sp-xmit",
-                               SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+                               SdsAdd(sds, SDS{"size": len(payload)}),
                                "sending",
                        )
                        conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
                        if err := state.WriteSP(conn, state.csOur.Encrypt(nil, nil, payload)); err != nil {
-                               state.Ctx.LogE("sp-xmit", SdsAdd(sds, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-xmit", sds, err, "")
                                break
                        }
                }
@@ -691,34 +688,34 @@ func (state *SPState) StartWorkers(
                                if unmarshalErr.ErrorCode == xdr.ErrIO {
                                        break
                                }
-                               state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-recv", sds, err, "")
                                break
                        }
                        state.Ctx.LogD(
                                "sp-recv",
-                               SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+                               SdsAdd(sds, SDS{"size": len(payload)}),
                                "got payload",
                        )
                        payload, err = state.csTheir.Decrypt(nil, nil, payload)
                        if err != nil {
-                               state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-recv", sds, err, "")
                                break
                        }
                        state.Ctx.LogD(
                                "sp-recv",
-                               SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+                               SdsAdd(sds, SDS{"size": len(payload)}),
                                "processing",
                        )
                        replies, err := state.ProcessSP(payload)
                        if err != nil {
-                               state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-recv", sds, err, "")
                                break
                        }
                        go func() {
                                for _, reply := range replies {
                                        state.Ctx.LogD(
                                                "sp-recv",
-                                               SdsAdd(sds, SDS{"size": strconv.Itoa(len(reply))}),
+                                               SdsAdd(sds, SDS{"size": len(reply)}),
                                                "queuing reply",
                                        )
                                        state.payloads <- reply
@@ -750,7 +747,7 @@ func (state *SPState) Wait() {
 }
 
 func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
-       sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.Nice))}
+       sds := SDS{"node": state.Node.Id, "nice": int(state.Nice)}
        r := bytes.NewReader(payload)
        var err error
        var replies [][]byte
@@ -759,7 +756,7 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                state.Ctx.LogD("sp-process", sds, "unmarshaling header")
                var head SPHead
                if _, err = xdr.Unmarshal(r, &head); err != nil {
-                       state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"err": err}), "")
+                       state.Ctx.LogE("sp-process", sds, err, "")
                        return nil, err
                }
                switch head.Type {
@@ -769,13 +766,13 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                        state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
                        var info SPInfo
                        if _, err = xdr.Unmarshal(r, &info); err != nil {
-                               state.Ctx.LogE("sp-process", SdsAdd(sdsp, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-process", sdsp, err, "")
                                return nil, err
                        }
                        sdsp = SdsAdd(sds, SDS{
-                               "hash": ToBase32(info.Hash[:]),
-                               "size": strconv.FormatInt(int64(info.Size), 10),
-                               "nice": strconv.Itoa(int(info.Nice)),
+                               "pkt":  ToBase32(info.Hash[:]),
+                               "size": int64(info.Size),
+                               "nice": int(info.Nice),
                        })
                        if !state.listOnly && info.Nice > state.Nice {
                                state.Ctx.LogD("sp-process", sdsp, "too nice")
@@ -820,7 +817,7 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                        }
                        state.Ctx.LogI(
                                "sp-info",
-                               SdsAdd(sdsp, SDS{"offset": strconv.FormatInt(offset, 10)}),
+                               SdsAdd(sdsp, SDS{"offset": offset}),
                                "",
                        )
                        if !state.listOnly && (state.onlyPkts == nil || state.onlyPkts[*info.Hash]) {
@@ -834,15 +831,12 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                        state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
                        var file SPFile
                        if _, err = xdr.Unmarshal(r, &file); err != nil {
-                               state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{
-                                       "err":  err,
-                                       "type": "file",
-                               }), "")
+                               state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"type": "file"}), err, "")
                                return nil, err
                        }
                        sdsp["xx"] = string(TRx)
-                       sdsp["hash"] = ToBase32(file.Hash[:])
-                       sdsp["size"] = strconv.Itoa(len(file.Payload))
+                       sdsp["pkt"] = ToBase32(file.Hash[:])
+                       sdsp["size"] = len(file.Payload)
                        dirToSync := filepath.Join(
                                state.Ctx.Spool,
                                state.Node.Id.String(),
@@ -856,31 +850,33 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                                os.FileMode(0666),
                        )
                        if err != nil {
-                               state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-file", sdsp, err, "")
                                return nil, err
                        }
                        state.Ctx.LogD(
                                "sp-file",
-                               SdsAdd(sdsp, SDS{"offset": strconv.FormatInt(int64(file.Offset), 10)}),
+                               SdsAdd(sdsp, SDS{"offset": file.Offset}),
                                "seeking",
                        )
                        if _, err = fd.Seek(int64(file.Offset), io.SeekStart); err != nil {
-                               state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-file", sdsp, err, "")
                                fd.Close()
                                return nil, err
                        }
                        state.Ctx.LogD("sp-file", sdsp, "writing")
                        _, err = fd.Write(file.Payload)
                        if err != nil {
-                               state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-file", sdsp, err, "")
                                fd.Close()
                                return nil, err
                        }
-                       ourSize := uint64(file.Offset) + uint64(len(file.Payload))
+                       ourSize := file.Offset + uint64(len(file.Payload))
                        state.RLock()
-                       sdsp["fullsize"] = strconv.FormatInt(int64(state.infosTheir[*file.Hash].Size), 10)
-                       sdsp["size"] = strconv.FormatInt(int64(ourSize), 10)
-                       state.Ctx.LogP("sp-file", sdsp, "")
+                       sdsp["size"] = int64(ourSize)
+                       sdsp["fullsize"] = int64(state.infosTheir[*file.Hash].Size)
+                       if state.Ctx.ShowPrgrs {
+                               Progress(sdsp)
+                       }
                        if state.infosTheir[*file.Hash].Size != ourSize {
                                state.RUnlock()
                                fd.Close()
@@ -891,7 +887,7 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                        spWorkersGroup.Add(1)
                        go func() {
                                if err := fd.Sync(); err != nil {
-                                       state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "sync")
+                                       state.Ctx.LogE("sp-file", sdsp, err, "sync")
                                        fd.Close()
                                        return
                                }
@@ -899,19 +895,19 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                                defer state.wg.Done()
                                fd.Seek(0, io.SeekStart)
                                state.Ctx.LogD("sp-file", sdsp, "checking")
-                               gut, err := Check(fd, file.Hash[:])
+                               gut, err := Check(fd, file.Hash[:], sdsp, state.Ctx.ShowPrgrs)
                                fd.Close()
                                if err != nil || !gut {
-                                       state.Ctx.LogE("sp-file", sdsp, "checksum mismatch")
+                                       state.Ctx.LogE("sp-file", sdsp, errors.New("checksum mismatch"), "")
                                        return
                                }
                                state.Ctx.LogI("sp-done", SdsAdd(sdsp, SDS{"xx": string(TRx)}), "")
                                if err = os.Rename(filePath+PartSuffix, filePath); err != nil {
-                                       state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "rename")
+                                       state.Ctx.LogE("sp-file", sdsp, err, "rename")
                                        return
                                }
                                if err = DirSync(dirToSync); err != nil {
-                                       state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "sync")
+                                       state.Ctx.LogE("sp-file", sdsp, err, "sync")
                                        return
                                }
                                state.Lock()
@@ -927,13 +923,10 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                        state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
                        var done SPDone
                        if _, err = xdr.Unmarshal(r, &done); err != nil {
-                               state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{
-                                       "type": "done",
-                                       "err":  err,
-                               }), "")
+                               state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"type": "done"}), err, "")
                                return nil, err
                        }
-                       sdsp["hash"] = ToBase32(done.Hash[:])
+                       sdsp["pkt"] = ToBase32(done.Hash[:])
                        state.Ctx.LogD("sp-done", sdsp, "removing")
                        err := os.Remove(filepath.Join(
                                state.Ctx.Spool,
@@ -945,18 +938,18 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                        if err == nil {
                                state.Ctx.LogI("sp-done", sdsp, "")
                        } else {
-                               state.Ctx.LogE("sp-done", sdsp, "")
+                               state.Ctx.LogE("sp-done", sdsp, err, "")
                        }
                case SPTypeFreq:
                        sdsp := SdsAdd(sds, SDS{"type": "freq"})
                        state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
                        var freq SPFreq
                        if _, err = xdr.Unmarshal(r, &freq); err != nil {
-                               state.Ctx.LogE("sp-process", SdsAdd(sdsp, SDS{"err": err}), "")
+                               state.Ctx.LogE("sp-process", sdsp, err, "")
                                return nil, err
                        }
-                       sdsp["hash"] = ToBase32(freq.Hash[:])
-                       sdsp["offset"] = strconv.FormatInt(int64(freq.Offset), 10)
+                       sdsp["pkt"] = ToBase32(freq.Hash[:])
+                       sdsp["offset"] = freq.Offset
                        state.Ctx.LogD("sp-process", sdsp, "queueing")
                        nice, exists := state.infosOurSeen[*freq.Hash]
                        if exists {
@@ -988,7 +981,8 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                        state.Ctx.LogE(
                                "sp-process",
                                SdsAdd(sds, SDS{"type": head.Type}),
-                               "unknown",
+                               errors.New("unknown type"),
+                               "",
                        )
                        return nil, BadPktType
                }
@@ -1005,8 +999,8 @@ func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
                state.Ctx.LogI("sp-infos", SDS{
                        "xx":   string(TRx),
                        "node": state.Node.Id,
-                       "pkts": strconv.Itoa(pkts),
-                       "size": strconv.FormatInt(int64(size), 10),
+                       "pkts": pkts,
+                       "size": int64(size),
                }, "")
        }
        return payloadsSplit(replies), nil
index 5bc8b7960b79b8d2e904405b1e1aacd7282672d8..6abf278af8ab18cc61e785a80fb0c5565890bcca 100644 (file)
@@ -21,6 +21,7 @@ import (
        "bufio"
        "bytes"
        "encoding/base64"
+       "errors"
        "fmt"
        "io"
        "io/ioutil"
@@ -78,9 +79,7 @@ func (ctx *Ctx) Toss(
                pktName := filepath.Base(job.Fd.Name())
                sds := SDS{"node": job.PktEnc.Sender, "pkt": pktName}
                if job.PktEnc.Nice > nice {
-                       ctx.LogD("rx", SdsAdd(sds, SDS{
-                               "nice": strconv.Itoa(int(job.PktEnc.Nice)),
-                       }), "too nice")
+                       ctx.LogD("rx", SdsAdd(sds, SDS{"nice": int(job.PktEnc.Nice)}), "too nice")
                        continue
                }
                pipeR, pipeW := io.Pipe()
@@ -98,7 +97,7 @@ func (ctx *Ctx) Toss(
                        pipeW.Close()
                        job.Fd.Close()
                        if err != nil {
-                               ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "decryption")
+                               ctx.LogE("rx", sds, err, "decryption")
                        }
                }(job)
                var pkt Pkt
@@ -106,7 +105,7 @@ func (ctx *Ctx) Toss(
                var pktSize int64
                var pktSizeBlocks int64
                if _, err = xdr.Unmarshal(pipeR, &pkt); err != nil {
-                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "unmarshal")
+                       ctx.LogE("rx", sds, err, "unmarshal")
                        isBad = true
                        goto Closing
                }
@@ -116,7 +115,7 @@ func (ctx *Ctx) Toss(
                        pktSize -= poly1305.TagSize
                }
                pktSize -= pktSizeBlocks * poly1305.TagSize
-               sds["size"] = strconv.FormatInt(pktSize, 10)
+               sds["size"] = pktSize
                ctx.LogD("rx", sds, "taken")
                switch pkt.Type {
                case PktTypeExec:
@@ -137,7 +136,7 @@ func (ctx *Ctx) Toss(
                        sender := ctx.Neigh[*job.PktEnc.Sender]
                        cmdline, exists := sender.Exec[handle]
                        if !exists || len(cmdline) == 0 {
-                               ctx.LogE("rx", SdsAdd(sds, SDS{"err": "No handle found"}), "")
+                               ctx.LogE("rx", sds, errors.New("No handle found"), "")
                                isBad = true
                                goto Closing
                        }
@@ -158,7 +157,7 @@ func (ctx *Ctx) Toss(
                                cmd.Stdin = decompressor
                                output, err := cmd.Output()
                                if err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "handle")
+                                       ctx.LogE("rx", sds, err, "handle")
                                        isBad = true
                                        goto Closing
                                }
@@ -187,7 +186,7 @@ func (ctx *Ctx) Toss(
                                        }
                                }
                                if err = os.Remove(job.Fd.Name()); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+                                       ctx.LogE("rx", sds, err, "remove")
                                        isBad = true
                                }
                        }
@@ -198,46 +197,51 @@ func (ctx *Ctx) Toss(
                        dst := string(pkt.Path[:int(pkt.PathLen)])
                        sds := SdsAdd(sds, SDS{"type": "file", "dst": dst})
                        if filepath.IsAbs(dst) {
-                               ctx.LogE("rx", sds, "non-relative destination path")
+                               ctx.LogE("rx", sds, errors.New("non-relative destination path"), "")
                                isBad = true
                                goto Closing
                        }
                        incoming := ctx.Neigh[*job.PktEnc.Sender].Incoming
                        if incoming == nil {
-                               ctx.LogE("rx", sds, "incoming is not allowed")
+                               ctx.LogE("rx", sds, errors.New("incoming is not allowed"), "")
                                isBad = true
                                goto Closing
                        }
                        dir := filepath.Join(*incoming, path.Dir(dst))
                        if err = os.MkdirAll(dir, os.FileMode(0777)); err != nil {
-                               ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "mkdir")
+                               ctx.LogE("rx", sds, err, "mkdir")
                                isBad = true
                                goto Closing
                        }
                        if !dryRun {
                                tmp, err := TempFile(dir, "file")
                                if err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "mktemp")
+                                       ctx.LogE("rx", sds, err, "mktemp")
                                        isBad = true
                                        goto Closing
                                }
                                sds["tmp"] = tmp.Name()
                                ctx.LogD("rx", sds, "created")
                                bufW := bufio.NewWriter(tmp)
-                               if _, err = io.Copy(bufW, pipeR); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy")
+                               if _, err = CopyProgressed(
+                                       bufW,
+                                       pipeR,
+                                       SdsAdd(sds, SDS{"fullsize": sds["size"]}),
+                                       ctx.ShowPrgrs,
+                               ); err != nil {
+                                       ctx.LogE("rx", sds, err, "copy")
                                        isBad = true
                                        goto Closing
                                }
                                if err = bufW.Flush(); err != nil {
                                        tmp.Close()
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy")
+                                       ctx.LogE("rx", sds, err, "copy")
                                        isBad = true
                                        goto Closing
                                }
                                if err = tmp.Sync(); err != nil {
                                        tmp.Close()
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy")
+                                       ctx.LogE("rx", sds, err, "copy")
                                        isBad = true
                                        goto Closing
                                }
@@ -250,7 +254,7 @@ func (ctx *Ctx) Toss(
                                                if os.IsNotExist(err) {
                                                        break
                                                }
-                                               ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "stat")
+                                               ctx.LogE("rx", sds, err, "stat")
                                                isBad = true
                                                goto Closing
                                        }
@@ -258,11 +262,11 @@ func (ctx *Ctx) Toss(
                                        dstPathCtr++
                                }
                                if err = os.Rename(tmp.Name(), dstPath); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "rename")
+                                       ctx.LogE("rx", sds, err, "rename")
                                        isBad = true
                                }
                                if err = DirSync(*incoming); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "sync")
+                                       ctx.LogE("rx", sds, err, "sync")
                                        isBad = true
                                }
                                delete(sds, "tmp")
@@ -275,7 +279,7 @@ func (ctx *Ctx) Toss(
                                        }
                                }
                                if err = os.Remove(job.Fd.Name()); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+                                       ctx.LogE("rx", sds, err, "remove")
                                        isBad = true
                                }
                                if len(sendmail) > 0 && ctx.NotifyFile != nil {
@@ -298,14 +302,14 @@ func (ctx *Ctx) Toss(
                        }
                        src := string(pkt.Path[:int(pkt.PathLen)])
                        if filepath.IsAbs(src) {
-                               ctx.LogE("rx", sds, "non-relative source path")
+                               ctx.LogE("rx", sds, errors.New("non-relative source path"), "")
                                isBad = true
                                goto Closing
                        }
                        sds := SdsAdd(sds, SDS{"type": "freq", "src": src})
                        dstRaw, err := ioutil.ReadAll(pipeR)
                        if err != nil {
-                               ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "read")
+                               ctx.LogE("rx", sds, err, "read")
                                isBad = true
                                goto Closing
                        }
@@ -314,7 +318,7 @@ func (ctx *Ctx) Toss(
                        sender := ctx.Neigh[*job.PktEnc.Sender]
                        freqPath := sender.FreqPath
                        if freqPath == nil {
-                               ctx.LogE("rx", sds, "freqing is not allowed")
+                               ctx.LogE("rx", sds, errors.New("freqing is not allowed"), "")
                                isBad = true
                                goto Closing
                        }
@@ -329,7 +333,7 @@ func (ctx *Ctx) Toss(
                                        sender.FreqMaxSize,
                                )
                                if err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "tx file")
+                                       ctx.LogE("rx", sds, err, "tx file")
                                        isBad = true
                                        goto Closing
                                }
@@ -342,7 +346,7 @@ func (ctx *Ctx) Toss(
                                        }
                                }
                                if err = os.Remove(job.Fd.Name()); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+                                       ctx.LogE("rx", sds, err, "remove")
                                        isBad = true
                                }
                                if len(sendmail) > 0 && ctx.NotifyFreq != nil {
@@ -366,14 +370,14 @@ func (ctx *Ctx) Toss(
                        node, known := ctx.Neigh[nodeId]
                        sds := SdsAdd(sds, SDS{"type": "trns", "dst": nodeId})
                        if !known {
-                               ctx.LogE("rx", sds, "unknown node")
+                               ctx.LogE("rx", sds, errors.New("unknown node"), "")
                                isBad = true
                                goto Closing
                        }
                        ctx.LogD("rx", sds, "taken")
                        if !dryRun {
                                if err = ctx.TxTrns(node, job.PktEnc.Nice, pktSize, pipeR); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "tx trns")
+                                       ctx.LogE("rx", sds, err, "tx trns")
                                        isBad = true
                                        goto Closing
                                }
@@ -386,12 +390,12 @@ func (ctx *Ctx) Toss(
                                        }
                                }
                                if err = os.Remove(job.Fd.Name()); err != nil {
-                                       ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+                                       ctx.LogE("rx", sds, err, "remove")
                                        isBad = true
                                }
                        }
                default:
-                       ctx.LogE("rx", sds, "unknown type")
+                       ctx.LogE("rx", sds, errors.New("unknown type"), "")
                        isBad = true
                }
        Closing:
index 20853b0b27a3cd4a02eab6027a2081675bf6a11f..67bb659a672426895919fcd5961633c75d455df9 100644 (file)
--- a/src/tx.go
+++ b/src/tx.go
@@ -51,6 +51,7 @@ func (ctx *Ctx) Tx(
        nice uint8,
        size, minSize int64,
        src io.Reader,
+       pktName string,
 ) (*Node, error) {
        hops := make([]*Node, 0, 1+len(node.Via))
        hops = append(hops, node)
@@ -81,8 +82,8 @@ func (ctx *Ctx) Tx(
        go func(size int64, src io.Reader, dst io.WriteCloser) {
                ctx.LogD("tx", SDS{
                        "node": hops[0].Id,
-                       "nice": strconv.Itoa(int(nice)),
-                       "size": strconv.FormatInt(size, 10),
+                       "nice": int(nice),
+                       "size": size,
                }, "wrote")
                errs <- PktEncWrite(ctx.Self, hops[0], pkt, nice, size, padSize, src, dst)
                dst.Close()
@@ -97,8 +98,8 @@ func (ctx *Ctx) Tx(
                go func(node *Node, pkt *Pkt, size int64, src io.Reader, dst io.WriteCloser) {
                        ctx.LogD("tx", SDS{
                                "node": node.Id,
-                               "nice": strconv.Itoa(int(nice)),
-                               "size": strconv.FormatInt(size, 10),
+                               "nice": int(nice),
+                               "size": size,
                        }, "trns wrote")
                        errs <- PktEncWrite(ctx.Self, node, pkt, nice, size, 0, src, dst)
                        dst.Close()
@@ -106,7 +107,11 @@ func (ctx *Ctx) Tx(
                curSize = PktEncOverhead + PktSizeOverhead + sizeWithTags(PktOverhead+curSize)
        }
        go func() {
-               _, err := io.Copy(tmp.W, pipeR)
+               _, err := CopyProgressed(
+                       tmp.W, pipeR,
+                       SDS{"xx": string(TTx), "pkt": pktName, "fullsize": curSize},
+                       ctx.ShowPrgrs,
+               )
                errs <- err
        }()
        for i := 0; i <= len(hops); i++ {
@@ -320,19 +325,19 @@ func (ctx *Ctx) TxFile(
                if err != nil {
                        return err
                }
-               _, err = ctx.Tx(node, pkt, nice, fileSize, minSize, reader)
+               _, err = ctx.Tx(node, pkt, nice, fileSize, minSize, reader, dstPath)
                sds := SDS{
                        "type": "file",
                        "node": node.Id,
-                       "nice": strconv.Itoa(int(nice)),
+                       "nice": int(nice),
                        "src":  srcPath,
                        "dst":  dstPath,
-                       "size": strconv.FormatInt(fileSize, 10),
+                       "size": fileSize,
                }
                if err == nil {
                        ctx.LogI("tx", sds, "sent")
                } else {
-                       ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+                       ctx.LogE("tx", sds, err, "sent")
                }
                return err
        }
@@ -375,19 +380,20 @@ func (ctx *Ctx) TxFile(
                        sizeToSend,
                        minSize,
                        io.TeeReader(reader, hsh),
+                       path,
                )
                sds := SDS{
                        "type": "file",
                        "node": node.Id,
-                       "nice": strconv.Itoa(int(nice)),
+                       "nice": int(nice),
                        "src":  srcPath,
                        "dst":  path,
-                       "size": strconv.FormatInt(sizeToSend, 10),
+                       "size": sizeToSend,
                }
                if err == nil {
                        ctx.LogI("tx", sds, "sent")
                } else {
-                       ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+                       ctx.LogE("tx", sds, err, "sent")
                        return err
                }
                hsh.Sum(metaPkt.Checksums[chunkNum][:0])
@@ -408,19 +414,19 @@ func (ctx *Ctx) TxFile(
                return err
        }
        metaPktSize := int64(metaBuf.Len())
-       _, err = ctx.Tx(node, pkt, nice, metaPktSize, minSize, &metaBuf)
+       _, err = ctx.Tx(node, pkt, nice, metaPktSize, minSize, &metaBuf, path)
        sds := SDS{
                "type": "file",
                "node": node.Id,
-               "nice": strconv.Itoa(int(nice)),
+               "nice": int(nice),
                "src":  srcPath,
                "dst":  path,
-               "size": strconv.FormatInt(metaPktSize, 10),
+               "size": metaPktSize,
        }
        if err == nil {
                ctx.LogI("tx", sds, "sent")
        } else {
-               ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+               ctx.LogE("tx", sds, err, "sent")
        }
        return err
 }
@@ -444,19 +450,19 @@ func (ctx *Ctx) TxFreq(
        }
        src := strings.NewReader(dstPath)
        size := int64(src.Len())
-       _, err = ctx.Tx(node, pkt, nice, size, minSize, src)
+       _, err = ctx.Tx(node, pkt, nice, size, minSize, src, srcPath)
        sds := SDS{
                "type":      "freq",
                "node":      node.Id,
-               "nice":      strconv.Itoa(int(nice)),
-               "replynice": strconv.Itoa(int(replyNice)),
+               "nice":      int(nice),
+               "replynice": int(replyNice),
                "src":       srcPath,
                "dst":       dstPath,
        }
        if err == nil {
                ctx.LogI("tx", sds, "sent")
        } else {
-               ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+               ctx.LogE("tx", sds, err, "sent")
        }
        return err
 }
@@ -492,19 +498,19 @@ func (ctx *Ctx) TxExec(
                return err
        }
        size := int64(compressed.Len())
-       _, err = ctx.Tx(node, pkt, nice, size, minSize, &compressed)
+       _, err = ctx.Tx(node, pkt, nice, size, minSize, &compressed, handle)
        sds := SDS{
                "type":      "exec",
                "node":      node.Id,
-               "nice":      strconv.Itoa(int(nice)),
-               "replynice": strconv.Itoa(int(replyNice)),
+               "nice":      int(nice),
+               "replynice": int(replyNice),
                "dst":       strings.Join(append([]string{handle}, args...), " "),
-               "size":      strconv.FormatInt(size, 10),
+               "size":      size,
        }
        if err == nil {
                ctx.LogI("tx", sds, "sent")
        } else {
-               ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+               ctx.LogE("tx", sds, err, "sent")
        }
        return err
 }
@@ -513,20 +519,24 @@ func (ctx *Ctx) TxTrns(node *Node, nice uint8, size int64, src io.Reader) error
        sds := SDS{
                "type": "trns",
                "node": node.Id,
-               "nice": strconv.Itoa(int(nice)),
-               "size": strconv.FormatInt(size, 10),
+               "nice": int(nice),
+               "size": size,
        }
        ctx.LogD("tx", sds, "taken")
        if !ctx.IsEnoughSpace(size) {
                err := errors.New("is not enough space")
-               ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), err.Error())
+               ctx.LogE("tx", sds, err, err.Error())
                return err
        }
        tmp, err := ctx.NewTmpFileWHash()
        if err != nil {
                return err
        }
-       if _, err = io.Copy(tmp.W, src); err != nil {
+       if _, err = CopyProgressed(tmp.W, src, SDS{
+               "xx":       string(TTx),
+               "pkt":      node.Id.String(),
+               "fullsize": size,
+       }, ctx.ShowPrgrs); err != nil {
                return err
        }
        nodePath := filepath.Join(ctx.Spool, node.Id.String())
index ca77d4c553fb19e9d303dadef6e7a85d21058bf3..46e5b1e98b12bab0a5dc2049cac62b516a928b74 100644 (file)
@@ -84,6 +84,7 @@ func TestTx(t *testing.T) {
                        int64(src.Len()),
                        int64(padSize),
                        src,
+                       "pktName",
                )
                if err != nil {
                        return false
diff --git a/src/uilive/LICENSE b/src/uilive/LICENSE
new file mode 100644 (file)
index 0000000..e436d90
--- /dev/null
@@ -0,0 +1,10 @@
+MIT License
+===========
+
+Copyright (c) 2015, Greg Osuri
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/uilive/README.md b/src/uilive/README.md
new file mode 100644 (file)
index 0000000..5de7d82
--- /dev/null
@@ -0,0 +1,31 @@
+# uilive [![GoDoc](https://godoc.org/github.com/gosuri/uilive?status.svg)](https://godoc.org/github.com/gosuri/uilive) [![Build Status](https://travis-ci.org/gosuri/uilive.svg?branch=master)](https://travis-ci.org/gosuri/uilive)
+
+uilive is a go library for updating terminal output in realtime. It provides a buffered [io.Writer](https://golang.org/pkg/io/#Writer) that is flushed at a timed interval. uilive powers [uiprogress](https://github.com/gosuri/uiprogress).
+
+## Usage Example
+
+Calling `uilive.New()` will create a new writer. To start rendering, simply call `writer.Start()` and update the ui by writing to the `writer`. Full source for the below example is in [example/main.go](example/main.go).
+
+```go
+writer := uilive.New()
+// start listening for updates and render
+writer.Start()
+
+for i := 0; i <= 100; i++ {
+  fmt.Fprintf(writer, "Downloading.. (%d/%d) GB\n", i, 100)
+  time.Sleep(time.Millisecond * 5)
+}
+
+fmt.Fprintln(writer, "Finished: Downloaded 100GB")
+writer.Stop() // flush and stop rendering
+```
+
+The above will render
+
+![example](doc/example.gif)
+
+## Installation
+
+```sh
+$ go get -v github.com/gosuri/uilive
+```
diff --git a/src/uilive/doc.go b/src/uilive/doc.go
new file mode 100644 (file)
index 0000000..d481098
--- /dev/null
@@ -0,0 +1,2 @@
+// Package uilive provides a writer that live updates the terminal. It provides a buffered io.Writer that is flushed at a timed interval.
+package uilive
diff --git a/src/uilive/terminal_size.go b/src/uilive/terminal_size.go
new file mode 100644 (file)
index 0000000..7947abe
--- /dev/null
@@ -0,0 +1,37 @@
+// +build !windows
+
+package uilive
+
+import (
+       "os"
+       "runtime"
+       "syscall"
+       "unsafe"
+)
+
+type windowSize struct {
+       rows uint16
+       cols uint16
+}
+
+var out *os.File
+var err error
+var sz windowSize
+
+func getTermSize() (int, int) {
+       if runtime.GOOS == "openbsd" {
+               out, err = os.OpenFile("/dev/tty", os.O_RDWR, 0)
+               if err != nil {
+                       return 0, 0
+               }
+
+       } else {
+               out, err = os.OpenFile("/dev/tty", os.O_WRONLY, 0)
+               if err != nil {
+                       return 0, 0
+               }
+       }
+       _, _, _ = syscall.Syscall(syscall.SYS_IOCTL,
+               out.Fd(), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz)))
+       return int(sz.cols), int(sz.rows)
+}
diff --git a/src/uilive/writer.go b/src/uilive/writer.go
new file mode 100644 (file)
index 0000000..2a4010e
--- /dev/null
@@ -0,0 +1,144 @@
+// This is a fork of github.com/gosuri/uilive for NNCP project
+// * It does not buffer all the writes, but resets the buffer
+//   just only for single latest line. Some terminals have
+//   huge CPU usage if so much data (as copied files progress)
+//   is printed
+// * By default it uses stderr
+// * By default it uses 10ms refresh period
+// * defer-s are removed for less CPU usage
+// * By default it uses stderr
+// * By default it uses stderr
+// * By default it uses stderr
+// * Removed newline/bypass related code. No Windows support
+
+package uilive
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "os"
+       "sync"
+       "time"
+)
+
+// ESC is the ASCII code for escape character
+const ESC = 27
+
+// RefreshInterval is the default refresh interval to update the ui
+var RefreshInterval = 10 * time.Millisecond
+
+var overFlowHandled bool
+
+var termWidth int
+
+// Out is the default output writer for the Writer
+var Out = os.Stdout
+
+// FdWriter is a writer with a file descriptor.
+type FdWriter interface {
+       io.Writer
+       Fd() uintptr
+}
+
+// Writer is a buffered the writer that updates the terminal. The contents of writer will be flushed on a timed interval or when Flush is called.
+type Writer struct {
+       // Out is the writer to write to
+       Out io.Writer
+
+       // RefreshInterval is the time the UI sould refresh
+       RefreshInterval time.Duration
+
+       ticker *time.Ticker
+       tdone  chan struct{}
+
+       buf bytes.Buffer
+       mtx *sync.Mutex
+}
+
+// New returns a new Writer with defaults
+func New() *Writer {
+       termWidth, _ = getTermSize()
+       if termWidth != 0 {
+               overFlowHandled = true
+       }
+       return &Writer{
+               Out:             Out,
+               RefreshInterval: RefreshInterval,
+               mtx:             &sync.Mutex{},
+       }
+}
+
+// clear the line and move the cursor up
+var clear = fmt.Sprintf("%c[%dA%c[2K", ESC, 1, ESC)
+
+func (w *Writer) clearLines() {
+       fmt.Fprint(w.Out, clear)
+}
+
+// Flush writes to the out and resets the buffer. It should be called after the last call to Write to ensure that any data buffered in the Writer is written to output.
+// Any incomplete escape sequence at the end is considered complete for formatting purposes.
+// An error is returned if the contents of the buffer cannot be written to the underlying output stream
+func (w *Writer) Flush() (err error) {
+       w.mtx.Lock()
+       // do nothing if buffer is empty
+       if len(w.buf.Bytes()) == 0 {
+               w.mtx.Unlock()
+               return
+       }
+       w.clearLines()
+       var currentLine bytes.Buffer
+       for _, b := range w.buf.Bytes() {
+               if b == '\n' {
+                       currentLine.Reset()
+               } else {
+                       currentLine.Write([]byte{b})
+                       if overFlowHandled && currentLine.Len() > termWidth {
+                               currentLine.Reset()
+                       }
+               }
+       }
+       _, err = w.Out.Write(w.buf.Bytes())
+       w.mtx.Unlock()
+       return
+}
+
+// Start starts the listener in a non-blocking manner
+func (w *Writer) Start() {
+       w.ticker = time.NewTicker(w.RefreshInterval)
+       w.tdone = make(chan struct{}, 0)
+       w.Out.Write([]byte("\n"))
+       go w.Listen()
+}
+
+// Stop stops the listener that updates the terminal
+func (w *Writer) Stop() {
+       w.Flush()
+       close(w.tdone)
+}
+
+// Listen listens for updates to the writer's buffer and flushes to the out provided. It blocks the runtime.
+func (w *Writer) Listen() {
+       for {
+               select {
+               case <-w.ticker.C:
+                       if w.ticker != nil {
+                               w.Flush()
+                       }
+               case <-w.tdone:
+                       w.mtx.Lock()
+                       w.ticker.Stop()
+                       w.mtx.Unlock()
+                       return
+               }
+       }
+}
+
+// Write save the contents of buf to the writer b. The only errors returned are ones encountered while writing to the underlying buffer.
+func (w *Writer) Write(buf []byte) (n int, err error) {
+       w.mtx.Lock()
+       w.buf.Reset()
+       n, err = w.buf.Write(buf)
+       w.mtx.Unlock()
+       return
+}