]> Cypherpunks.ru repositories - nncp.git/commitdiff
Merge branch 'develop' v8.0.0
authorSergey Matveev <stargrave@stargrave.org>
Mon, 8 Nov 2021 11:05:45 +0000 (14:05 +0300)
committerSergey Matveev <stargrave@stargrave.org>
Mon, 8 Nov 2021 11:05:45 +0000 (14:05 +0300)
44 files changed:
doc/cmd/nncp-exec.texi
doc/cmd/nncp-file.texi
doc/download.texi
doc/install.texi
doc/integration/warc.texi
doc/news.ru.texi
doc/news.texi
makedist.sh
src/cmd/nncp-bundle/main.go
src/cmd/nncp-call/main.go
src/cmd/nncp-caller/main.go
src/cmd/nncp-cfgdir/main.go
src/cmd/nncp-cfgenc/main.go
src/cmd/nncp-cfgmin/main.go
src/cmd/nncp-cfgnew/main.go
src/cmd/nncp-check/main.go
src/cmd/nncp-cronexpr/main.go
src/cmd/nncp-daemon/main.go
src/cmd/nncp-exec/main.go
src/cmd/nncp-file/main.go
src/cmd/nncp-freq/main.go
src/cmd/nncp-hash/main.go
src/cmd/nncp-log/main.go
src/cmd/nncp-pkt/main.go
src/cmd/nncp-reass/main.go
src/cmd/nncp-rm/main.go
src/cmd/nncp-stat/main.go
src/cmd/nncp-toss/main.go
src/cmd/nncp-trns/main.go
src/cmd/nncp-xfer/main.go
src/go.mod
src/go.sum
src/jobs.go
src/log.go
src/magic.go
src/nice.go
src/nncp.go
src/pkt.go
src/pkt_test.go
src/progress.go
src/toss.go
src/toss_test.go
src/tx.go
src/tx_test.go

index 7eb5e8a7c0b3afe6168c98c2d8d73c42d65e0d72..eec6a7e5fca4a8b32e163657ca0e483488f5e9f6 100644 (file)
@@ -12,11 +12,6 @@ Body is read from @code{stdin} into memory and compressed (unless
 execute specified @ref{CfgExec, handle} command with @option{ARG*}
 appended and decompressed body fed to command's @code{stdin}.
 
-If @option{-use-tmp} option is specified, then @code{stdin} data is read
-into temporary file first, requiring twice more disk space, but no
-memory requirements. @ref{StdinTmpFile, Same temporary file} rules
-applies as with @ref{nncp-file, nncp-file -} command.
-
 For example, if remote side has following configuration file for your
 node:
 
index 24dafde0c554b486e35b44d52701a7226b9957fd..0a80a4c5996b20163e91257354f8d62d6082b2ba 100644 (file)
@@ -11,23 +11,11 @@ destination file name in remote's @ref{CfgIncoming, incoming}
 directory. If this file already exists there, then counter will be
 appended to it.
 
-This command queues file in @ref{Spool, spool} directory immediately
-(through the temporary file of course) -- so pay attention that sending
-2 GiB file will create 2 GiB outbound encrypted packet.
-
-@anchor{StdinTmpFile}
-If @file{SRC} equals to @file{-}, then create an encrypted temporary
-file and copy everything taken from @code{stdin} to it and use for outbound
-packet creation. Pay attention that if you want to send 1 GiB of data
-taken from @code{stdin}, then you have to have more than 2 GiB of disk space
-for that temporary file and resulting encrypted packet. You can control
-temporary file location directory with @env{$TMPDIR} environment
-variable. Encryption is performed in AEAD mode with
-@url{https://cr.yp.to/chacha.html, ChaCha20}-@url{https://en.wikipedia.org/wiki/Poly1305, Poly1305}
-algorithms. Data is divided on 128 KiB blocks. Each block is encrypted
-with increasing nonce counter. File is deletes immediately after
-creation, so even if program crashes -- disk space will be reclaimed, no
-need in cleaning it up later.
+This command queues file in @ref{Spool, spool} directory immediately --
+so pay attention that sending 2 GiB file will create 2 GiB outbound
+encrypted packet.
+
+If @file{SRC} equals to @file{-}, to data is read from @code{stdin}.
 
 If @file{SRC} points to directory, then
 @url{https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_01, pax archive}
index 802a6348ef86ad6dabb3a2bfa3dfcf8a34342e48..dde05c542433d49f15b8aba454e6f50593262ac5 100644 (file)
@@ -27,227 +27,399 @@ Tarballs include all necessary required libraries:
 @item @code{lukechampine.com/blake3} @tab MIT
 @end multitable
 
-@multitable {XXXXX} {XXXX-XX-XX} {XXXX KiB} {link sign} {xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx}
+@multitable {XXXXX} {XXXX-XX-XX} {XXXX KiB} {meta4 link sig} {xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx}
 @headitem Version @tab Date @tab Size @tab Tarball @tab SHA256 checksum
 
+@item @ref{Release 7_7_0, 7.7.0} @tab 2021-09-11 @tab 1180 KiB
+@tab
+    @url{download/nncp-7.7.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.7.0.tar.xz, link}
+    @url{download/nncp-7.7.0.tar.xz.sig, sig}
+@tab @code{A692A2FC 963CB0A4 5BFD5B7F 497A26D0 BD738630 4F9FA3CD 526DC69F CA3929EE}
+
 @item @ref{Release 7_6_0, 7.6.0} @tab 2021-08-08 @tab 1153 KiB
-@tab @url{download/nncp-7.6.0.tar.xz, link} @url{download/nncp-7.6.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.6.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.6.0.tar.xz, link}
+    @url{download/nncp-7.6.0.tar.xz.sig, sig}
 @tab @code{00852E80 70415154 197A5555 DDAE636E 6E3940EC DD53D39E A69E5FF1 531BA4C6}
 
 @item @ref{Release 7_5_1, 7.5.1} @tab 2021-08-05 @tab 1147 KiB
-@tab @url{download/nncp-7.5.1.tar.xz, link} @url{download/nncp-7.5.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.5.1.tar.xz.meta4, meta4}
+    @url{download/nncp-7.5.1.tar.xz, link}
+    @url{download/nncp-7.5.1.tar.xz.sig, sig}
 @tab @code{B093A745 C2EB9F5F E8341ED2 A6F1EE75 701B2646 B5701BAA F4E760D9 32CDD91A}
 
 @item @ref{Release 7_5_0, 7.5.0} @tab 2021-07-28 @tab 1151 KiB
-@tab @url{download/nncp-7.5.0.tar.xz, link} @url{download/nncp-7.5.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.5.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.5.0.tar.xz, link}
+    @url{download/nncp-7.5.0.tar.xz.sig, sig}
 @tab @code{14D92DC5 B8164EE4 4926D7AF 46DA9F23 0C8F6207 350CC747 6DB5CDFB 8E7C3FE4}
 
 @item @ref{Release 7_4_0, 7.4.0} @tab 2021-07-19 @tab 1153 KiB
-@tab @url{download/nncp-7.4.0.tar.xz, link} @url{download/nncp-7.4.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.4.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.4.0.tar.xz, link}
+    @url{download/nncp-7.4.0.tar.xz.sig, sig}
 @tab @code{F7499FBF B0658054 F2732722 D54FE31E A0F105FD 9970B5BB 6413A9CC 065CB0EB}
 
 @item @ref{Release 7_3_2, 7.3.2} @tab 2021-07-12 @tab 1141 KiB
-@tab @url{download/nncp-7.3.2.tar.xz, link} @url{download/nncp-7.3.2.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.3.2.tar.xz.meta4, meta4}
+    @url{download/nncp-7.3.2.tar.xz, link}
+    @url{download/nncp-7.3.2.tar.xz.sig, sig}
 @tab @code{65F6A230 04189D3F 307D160C AE97F99A 620DDA23 52821652 15DDC946 F6CC4B7F}
 
 @item @ref{Release 7_3_1, 7.3.1} @tab 2021-07-11 @tab 1142 KiB
-@tab @url{download/nncp-7.3.1.tar.xz, link} @url{download/nncp-7.3.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.3.1.tar.xz.meta4, meta4}
+    @url{download/nncp-7.3.1.tar.xz, link}
+    @url{download/nncp-7.3.1.tar.xz.sig, sig}
 @tab @code{8611DC6A 3EAC7FFA A6A1C688 2073AB4D A4E93D36 C864F050 C5F880FE 10FCFC46}
 
 @item @ref{Release 7_3_0, 7.3.0} @tab 2021-07-10 @tab 1141 KiB
-@tab @url{download/nncp-7.3.0.tar.xz, link} @url{download/nncp-7.3.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.3.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.3.0.tar.xz, link}
+    @url{download/nncp-7.3.0.tar.xz.sig, sig}
 @tab @code{CB34487A 6D7EF507 04D4B8F9 5A16EF16 CC841D3D 7F5423B1 EBB7979D 1062EB4E}
 
 @item @ref{Release 7_2_1, 7.2.1} @tab 2021-07-09 @tab 1139 KiB
-@tab @url{download/nncp-7.2.1.tar.xz, link} @url{download/nncp-7.2.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.2.1.tar.xz.meta4, meta4}
+    @url{download/nncp-7.2.1.tar.xz, link}
+    @url{download/nncp-7.2.1.tar.xz.sig, sig}
 @tab @code{6462BA44 7DB30234 DA6DFB4B B5BF890F 6CA2CC36 697B3AE7 E6F86B86 94AC97D6}
 
 @item @ref{Release 7_2_0, 7.2.0} @tab 2021-07-08 @tab 1136 KiB
-@tab @url{download/nncp-7.2.0.tar.xz, link} @url{download/nncp-7.2.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.2.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.2.0.tar.xz, link}
+    @url{download/nncp-7.2.0.tar.xz.sig, sig}
 @tab @code{70DBB97B 86C9B4B6 E35CFF02 B8C9FAE2 4323EEA5 C56403A2 66CBA268 D82F5077}
 
 @item @ref{Release 7_1_1, 7.1.1} @tab 2021-07-06 @tab 1132 KiB
-@tab @url{download/nncp-7.1.1.tar.xz, link} @url{download/nncp-7.1.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.1.1.tar.xz.meta4, meta4}
+    @url{download/nncp-7.1.1.tar.xz, link}
+    @url{download/nncp-7.1.1.tar.xz.sig, sig}
 @tab @code{B741C9E3 EC3DB342 893FE081 888C40E4 B94E4298 E5C1A8E0 BA4D179C C239CCCA}
 
 @item @ref{Release 7_1_0, 7.1.0} @tab 2021-07-04 @tab 1142 KiB
-@tab @url{download/nncp-7.1.0.tar.xz, link} @url{download/nncp-7.1.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.1.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.1.0.tar.xz, link}
+    @url{download/nncp-7.1.0.tar.xz.sig, sig}
 @tab @code{D3BC010F 5D86BB59 E07A2A84 2FF9C73B 4C2F780B 807EF25C E4BC477C E40764A6}
 
 @item @ref{Release 7_0_0, 7.0.0} @tab 2021-06-30 @tab 1123 KiB
-@tab @url{download/nncp-7.0.0.tar.xz, link} @url{download/nncp-7.0.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-7.0.0.tar.xz.meta4, meta4}
+    @url{download/nncp-7.0.0.tar.xz, link}
+    @url{download/nncp-7.0.0.tar.xz.sig, sig}
 @tab @code{D4D28E9A CF40FE12 68BDE134 9CD36076 282395BE 70094EFB 0DB75CE8 C32EA664}
 
 @item @ref{Release 6_6_0, 6.6.0} @tab 2021-06-26 @tab 1041 KiB
-@tab @url{download/nncp-6.6.0.tar.xz, link} @url{download/nncp-6.6.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.6.0.tar.xz.meta4, meta4}
+    @url{download/nncp-6.6.0.tar.xz, link}
+    @url{download/nncp-6.6.0.tar.xz.sig, sig}
 @tab @code{73DB666F A5C30282 770516B2 F39F1240 74117B45 A9F4B484 0361861A 183577F1}
 
 @item @ref{Release 6_5_0, 6.5.0} @tab 2021-05-30 @tab 1041 KiB
-@tab @url{download/nncp-6.5.0.tar.xz, link} @url{download/nncp-6.5.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.5.0.tar.xz.meta4, meta4}
+    @url{download/nncp-6.5.0.tar.xz, link}
+    @url{download/nncp-6.5.0.tar.xz.sig, sig}
 @tab @code{241D2AA7 27275CCF 86F06797 1AA8B3B8 D625C85C 4279DFDE 560216E3 38670B9A}
 
 @item @ref{Release 6_4_0, 6.4.0} @tab 2021-04-22 @tab 1042 KiB
-@tab @url{download/nncp-6.4.0.tar.xz, link} @url{download/nncp-6.4.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.4.0.tar.xz.meta4, meta4}
+    @url{download/nncp-6.4.0.tar.xz, link}
+    @url{download/nncp-6.4.0.tar.xz.sig, sig}
 @tab @code{3D0D1156 D69AF698 D402663C F84E51CC 3D40A50D 300E34D1 105A6F75 32E4B99B}
 
 @item @ref{Release 6_3_0, 6.3.0} @tab 2021-04-14 @tab 1042 KiB
-@tab @url{download/nncp-6.3.0.tar.xz, link} @url{download/nncp-6.3.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.3.0.tar.xz.meta4, meta4}
+    @url{download/nncp-6.3.0.tar.xz, link}
+    @url{download/nncp-6.3.0.tar.xz.sig, sig}
 @tab @code{76C26A11 E3423540 BB7B8470 820176A3 5FCD0493 B21A872E C223EB94 43BA466B}
 
 @item @ref{Release 6_2_1, 6.2.1} @tab 2021-03-26 @tab 1038 KiB
-@tab @url{download/nncp-6.2.1.tar.xz, link} @url{download/nncp-6.2.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.2.1.tar.xz.meta4, meta4}
+    @url{download/nncp-6.2.1.tar.xz, link}
+    @url{download/nncp-6.2.1.tar.xz.sig, sig}
 @tab @code{D9682D95 4D68025A F5B07516 258D9FFC DA29A4D7 E7E1635B E0C219A1 C5DDB067}
 
 @item @ref{Release 6_2_0, 6.2.0} @tab 2021-03-07 @tab 1038 KiB
-@tab @url{download/nncp-6.2.0.tar.xz, link} @url{download/nncp-6.2.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.2.0.tar.xz.meta4, meta4}
+    @url{download/nncp-6.2.0.tar.xz, link}
+    @url{download/nncp-6.2.0.tar.xz.sig, sig}
 @tab @code{272CEDED 69FFF3B3 78767297 3199481A C610B753 BB82C22E ECEC45FC 05DA40FE}
 
 @item @ref{Release 6_1_0, 6.1.0} @tab 2021-02-24 @tab 1040 KiB
-@tab @url{download/nncp-6.1.0.tar.xz, link} @url{download/nncp-6.1.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.1.0.tar.xz.meta4, meta4}
+    @url{download/nncp-6.1.0.tar.xz, link}
+    @url{download/nncp-6.1.0.tar.xz.sig, sig}
 @tab @code{083A533F 7D021206 9AE07F9F D6CD22E3 C5BE09E8 30F2C9C4 97D97CF6 14E5413F}
 
 @item @ref{Release 6_0_0, 6.0.0} @tab 2021-01-23 @tab 1028 KiB
-@tab @url{download/nncp-6.0.0.tar.xz, link} @url{download/nncp-6.0.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-6.0.0.tar.xz.meta4, meta4}
+    @url{download/nncp-6.0.0.tar.xz, link}
+    @url{download/nncp-6.0.0.tar.xz.sig, sig}
 @tab @code{42FE8AA5 4520B3A1 ABB50D66 1BBBA6A1 41CE4E74 9B4816B0 D4C6845D 67465916}
 
 @item @ref{Release 5_6_0, 5.6.0} @tab 2021-01-17 @tab 1024 KiB
-@tab @url{download/nncp-5.6.0.tar.xz, link} @url{download/nncp-5.6.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.6.0.tar.xz.meta4, meta4}
+    @url{download/nncp-5.6.0.tar.xz, link}
+    @url{download/nncp-5.6.0.tar.xz.sig, sig}
 @tab @code{1DC83F05 F14A3C3B 95820046 C60B170E B8C8936F 142A5B9A 1E943E6F 4CEFBDE3}
 
 @item @ref{Release 5_5_1, 5.5.1} @tab 2021-01-11 @tab 1165 KiB
-@tab @url{download/nncp-5.5.1.tar.xz, link} @url{download/nncp-5.5.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.5.1.tar.xz.meta4, meta4}
+    @url{download/nncp-5.5.1.tar.xz, link}
+    @url{download/nncp-5.5.1.tar.xz.sig, sig}
 @tab @code{E7DEED7A D3BA696C F64359C0 DC0A93AD 109950C5 6660D028 5FD7BB57 120C9CF7}
 
 @item @ref{Release 5_5_0, 5.5.0} @tab 2021-01-07 @tab 1161 KiB
-@tab @url{download/nncp-5.5.0.tar.xz, link} @url{download/nncp-5.5.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.5.0.tar.xz.meta4, meta4}
+    @url{download/nncp-5.5.0.tar.xz, link}
+    @url{download/nncp-5.5.0.tar.xz.sig, sig}
 @tab @code{EF0CBEE1 520BE97D A210794C 172BF444 E6F75DB2 84F5BD05 66919193 326AED77}
 
 @item @ref{Release 5_4_1, 5.4.1} @tab 2020-09-28 @tab 1143 KiB
-@tab @url{download/nncp-5.4.1.tar.xz, link} @url{download/nncp-5.4.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.4.1.tar.xz.meta4, meta4}
+    @url{download/nncp-5.4.1.tar.xz, link}
+    @url{download/nncp-5.4.1.tar.xz.sig, sig}
 @tab @code{A02D0C9B 51533DF8 115C17E1 02F8C485 9F7B805A 64290CDF 79151BA9 E627FA63}
 
 @item @ref{Release 5_3_3, 5.3.3} @tab 2020-01-23 @tab 1116 KiB
-@tab @url{download/nncp-5.3.3.tar.xz, link} @url{download/nncp-5.3.3.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.3.3.tar.xz.meta4, meta4}
+    @url{download/nncp-5.3.3.tar.xz, link}
+    @url{download/nncp-5.3.3.tar.xz.sig, sig}
 @tab @code{707CD852 4E424C24 BCB22D6B 4BC81709 71C42A5F E0062B93 A8D1DD9D 7FB365D0}
 
 @item @ref{Release 5_3_2, 5.3.2} @tab 2019-12-28 @tab 1118 KiB
-@tab @url{download/nncp-5.3.2.tar.xz, link} @url{download/nncp-5.3.2.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.3.2.tar.xz.meta4, meta4}
+    @url{download/nncp-5.3.2.tar.xz, link}
+    @url{download/nncp-5.3.2.tar.xz.sig, sig}
 @tab @code{6E2D1B3C CA0DD462 A6F5F8DE 5CB8DE15 C3D33C74 238A2C52 373C7BD6 A126A834}
 
 @item @ref{Release 5_3_1, 5.3.1} @tab 2019-12-25 @tab 1117 KiB
-@tab @url{download/nncp-5.3.1.tar.xz, link} @url{download/nncp-5.3.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.3.1.tar.xz.meta4, meta4}
+    @url{download/nncp-5.3.1.tar.xz, link}
+    @url{download/nncp-5.3.1.tar.xz.sig, sig}
 @tab @code{23A52819 F0395A6A E05E4176 017DCA3C 4A20A023 EEADA6A3 3168E58D BEE34A5B}
 
 @item @ref{Release 5_3_0, 5.3.0} @tab 2019-12-22 @tab 1112 KiB
-@tab @url{download/nncp-5.3.0.tar.xz, link} @url{download/nncp-5.3.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.3.0.tar.xz.meta4, meta4}
+    @url{download/nncp-5.3.0.tar.xz, link}
+    @url{download/nncp-5.3.0.tar.xz.sig, sig}
 @tab @code{9F093115 506D00E7 2E41ACD6 3F283172 8430E1C2 8BA4A941 FFA3C65D 89AD4ED0}
 
 @item @ref{Release 5_2_1, 5.2.1} @tab 2019-12-15 @tab 1109 KiB
-@tab @url{download/nncp-5.2.1.tar.xz, link} @url{download/nncp-5.2.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.2.1.tar.xz.meta4, meta4}
+    @url{download/nncp-5.2.1.tar.xz, link}
+    @url{download/nncp-5.2.1.tar.xz.sig, sig}
 @tab @code{983D1A8A 4398C281 76356AE1 C5541124 B0755555 D115063B D1388F85 9C4A6B3E}
 
 @item @ref{Release 5_2_0, 5.2.0} @tab 2019-12-14 @tab 1109 KiB
-@tab @url{download/nncp-5.2.0.tar.xz, link} @url{download/nncp-5.2.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.2.0.tar.xz.meta4, meta4}
+    @url{download/nncp-5.2.0.tar.xz, link}
+    @url{download/nncp-5.2.0.tar.xz.sig, sig}
 @tab @code{FFC55467 8B4ECCA6 92D90F42 ACC0286D 209E054E EA1CBF87 0307003E CF219610}
 
 @item @ref{Release 5_1_2, 5.1.2} @tab 2019-12-13 @tab 1106 KiB
-@tab @url{download/nncp-5.1.2.tar.xz, link} @url{download/nncp-5.1.2.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.1.2.tar.xz.meta4, meta4}
+    @url{download/nncp-5.1.2.tar.xz, link}
+    @url{download/nncp-5.1.2.tar.xz.sig, sig}
 @tab @code{52B2043B 1B22D20F C44698EC AFE5FF46 F99B4DD5 2C392D4D 25FE1580 993263B3}
 
 @item @ref{Release 5_1_1, 5.1.1} @tab 2019-12-01 @tab 1103 KiB
-@tab @url{download/nncp-5.1.1.tar.xz, link} @url{download/nncp-5.1.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.1.1.tar.xz.meta4, meta4}
+    @url{download/nncp-5.1.1.tar.xz, link}
+    @url{download/nncp-5.1.1.tar.xz.sig, sig}
 @tab @code{B9537678 E5B549BA 6FA0D20D 41B2D4A9 4ED31F2C AB9FAF63 A388D95E 7662A93F}
 
 @item @ref{Release 5_1_0, 5.1.0} @tab 2019-11-24 @tab 1103 KiB
-@tab @url{download/nncp-5.1.0.tar.xz, link} @url{download/nncp-5.1.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.1.0.tar.xz.meta4, meta4}
+    @url{download/nncp-5.1.0.tar.xz, link}
+    @url{download/nncp-5.1.0.tar.xz.sig, sig}
 @tab @code{6F5B74EC 952EAFEC 2A787463 CE1E808E CC990F03 D46F28E9 A89BAB55 5A2C2214}
 
 @item @ref{Release 5_0_0, 5.0.0} @tab 2019-11-15 @tab 1099 KiB
-@tab @url{download/nncp-5.0.0.tar.xz, link} @url{download/nncp-5.0.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-5.0.0.tar.xz.meta4, meta4}
+    @url{download/nncp-5.0.0.tar.xz, link}
+    @url{download/nncp-5.0.0.tar.xz.sig, sig}
 @tab @code{3696D7EE B0783E91 87E5EEF4 EFC35235 10452353 7C51FA4C 9BD3CBEE A22678B3}
 
 @item @ref{Release 4_1, 4.1} @tab 2019-05-01 @tab 1227 KiB
-@tab @url{download/nncp-4.1.tar.xz, link} @url{download/nncp-4.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-4.1.tar.xz.meta4, meta4}
+    @url{download/nncp-4.1.tar.xz, link}
+    @url{download/nncp-4.1.tar.xz.sig, sig}
 @tab @code{29AEC53D EC914906 D7C47194 0955A32E 2BF470E6 9B8E09D3 AF3B62D8 CC8E541E}
 
 @item @ref{Release 4_0, 4.0} @tab 2019-04-28 @tab 1227 KiB
-@tab @url{download/nncp-4.0.tar.xz, link} @url{download/nncp-4.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-4.0.tar.xz.meta4, meta4}
+    @url{download/nncp-4.0.tar.xz, link}
+    @url{download/nncp-4.0.tar.xz.sig, sig}
 @tab @code{EAFA6272 22E355FC EB772A90 FC6DEA8E AE1F1695 3F48A4A3 57ADA0B4 FF918452}
 
 @item @ref{Release 3_4, 3.4} @tab 2018-06-10 @tab 1154 KiB
-@tab @url{download/nncp-3.4.tar.xz, link} @url{download/nncp-3.4.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-3.4.tar.xz.meta4, meta4}
+    @url{download/nncp-3.4.tar.xz, link}
+    @url{download/nncp-3.4.tar.xz.sig, sig}
 @tab @code{9796C4CB 7B670FC7 5FEED3CD 467CA556 B230387D 935B09BB 4B19FD57 FD17FFBA}
 
 @item @ref{Release 3_3, 3.3} @tab 2018-06-02 @tab 1152 KiB
-@tab @url{download/nncp-3.3.tar.xz, link} @url{download/nncp-3.3.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-3.3.tar.xz.meta4, meta4}
+    @url{download/nncp-3.3.tar.xz, link}
+    @url{download/nncp-3.3.tar.xz.sig, sig}
 @tab @code{1F8FA9B4 6125D8A9 0608298B A1ED87E1 12DB2D8B 81C766DE F4DFE191 C7B1BFC2}
 
 @item @ref{Release 3_2, 3.2} @tab 2018-05-27 @tab 1147 KiB
-@tab @url{download/nncp-3.2.tar.xz, link} @url{download/nncp-3.2.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-3.2.tar.xz.meta4, meta4}
+    @url{download/nncp-3.2.tar.xz, link}
+    @url{download/nncp-3.2.tar.xz.sig, sig}
 @tab @code{BE76802F 1E273D1D E91F0648 A7CB23C5 989F5390 A36F2D0C FD873046 51B9141E}
 
 @item @ref{Release 3_1, 3.1} @tab 2018-02-18 @tab 1145 KiB
-@tab @url{download/nncp-3.1.tar.xz, link} @url{download/nncp-3.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-3.1.tar.xz.meta4, meta4}
+    @url{download/nncp-3.1.tar.xz, link}
+    @url{download/nncp-3.1.tar.xz.sig, sig}
 @tab @code{B9344516 4230B58E 8AAADAA2 066F37F2 493CCB71 B025126B BCAD8FAD 6535149F}
 
 @item @ref{Release 3_0, 3.0} @tab 2017-12-30 @tab 993 KiB
-@tab @url{download/nncp-3.0.tar.xz, link} @url{download/nncp-3.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-3.0.tar.xz.meta4, meta4}
+    @url{download/nncp-3.0.tar.xz, link}
+    @url{download/nncp-3.0.tar.xz.sig, sig}
 @tab @code{248B2257 2F576E79 A19672E9 B82EB649 18FC95A9 194408C0 67EA4DD3 0468286D}
 
 @item @ref{Release 2_0, 2.0} @tab 2017-12-02 @tab 986 KiB
-@tab @url{download/nncp-2.0.tar.xz, link} @url{download/nncp-2.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-2.0.tar.xz.meta4, meta4}
+    @url{download/nncp-2.0.tar.xz, link}
+    @url{download/nncp-2.0.tar.xz.sig, sig}
 @tab @code{BEF31B13 FB25381E A511FB77 067798AB 27409238 BDF5600F E2EADB29 E5E78996}
 
 @item @ref{Release 1_0, 1.0} @tab 2017-12-02 @tab 987 KiB
-@tab @url{download/nncp-1.0.tar.xz, link} @url{download/nncp-1.0.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-1.0.tar.xz.meta4, meta4}
+    @url{download/nncp-1.0.tar.xz, link}
+    @url{download/nncp-1.0.tar.xz.sig, sig}
 @tab @code{68BF7803 CD25F59A 56D9FD6C 695002B5 BFBAF591 8A6583F4 3139FC28 CA1AB4AF}
 
 @item @ref{Release 0_12, 0.12} @tab 2017-10-08 @tab 978 KiB
-@tab @url{download/nncp-0.12.tar.xz, link} @url{download/nncp-0.12.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.12.tar.xz.meta4, meta4}
+    @url{download/nncp-0.12.tar.xz, link}
+    @url{download/nncp-0.12.tar.xz.sig, sig}
 @tab @code{707B4005 97753B29 73A5F3E5 DAB51B92 21CC296D 690EF4BC ADE93E0D 2595A5F2}
 
 @item @ref{Release 0_11, 0.11} @tab 2017-08-21 @tab 1031 KiB
-@tab @url{download/nncp-0.11.tar.xz, link} @url{download/nncp-0.11.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.11.tar.xz.meta4, meta4}
+    @url{download/nncp-0.11.tar.xz, link}
+    @url{download/nncp-0.11.tar.xz.sig, sig}
 @tab @code{D0F73C3B ADBF6B8B 13641A61 4D34F65F 20AF4C84 90894331 BF1F1609 2D65E719}
 
 @item @ref{Release 0_10, 0.10} @tab 2017-07-04 @tab 949 KiB
-@tab @url{download/nncp-0.10.tar.xz, link} @url{download/nncp-0.10.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.10.tar.xz.meta4, meta4}
+    @url{download/nncp-0.10.tar.xz, link}
+    @url{download/nncp-0.10.tar.xz.sig, sig}
 @tab @code{DCE7C762 2F9281EB 282F1A67 5CA6500E 854F2DEC D60F3264 07872B91 4F4E6FA0}
 
 @item @ref{Release 0_9, 0.9} @tab 2017-05-17 @tab 942 KiB
-@tab @url{download/nncp-0.9.tar.xz, link} @url{download/nncp-0.9.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.9.tar.xz.meta4, meta4}
+    @url{download/nncp-0.9.tar.xz, link}
+    @url{download/nncp-0.9.tar.xz.sig, sig}
 @tab @code{8D0765A5 F9D81086 7E1F5AB4 52A9464D C5035CCB 4E09A29A 9C9A4934 1A72AB2C}
 
 @item @ref{Release 0_8, 0.8} @tab 2017-04-30 @tab 932 KiB
-@tab @url{download/nncp-0.8.tar.xz, link} @url{download/nncp-0.8.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.8.tar.xz.meta4, meta4}
+    @url{download/nncp-0.8.tar.xz, link}
+    @url{download/nncp-0.8.tar.xz.sig, sig}
 @tab @code{9BD607D5 C5551857 B7E9277D 0E857936 1DB7353A E0F1556E EA9B1D91 8305B184}
 
 @item @ref{Release 0_7, 0.7} @tab 2017-04-02 @tab 783 KiB
-@tab @url{download/nncp-0.7.tar.xz, link} @url{download/nncp-0.7.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.7.tar.xz.meta4, meta4}
+    @url{download/nncp-0.7.tar.xz, link}
+    @url{download/nncp-0.7.tar.xz.sig, sig}
 @tab @code{D3407323 F89296DD 743FA764 51964B43 794E61BE 0E1D2DD4 ABD02042 B94FFC4F}
 
 @item @ref{Release 0_6, 0.6} @tab 2017-02-05 @tab 746 KiB
-@tab @url{download/nncp-0.6.tar.xz, link} @url{download/nncp-0.6.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.6.tar.xz.meta4, meta4}
+    @url{download/nncp-0.6.tar.xz, link}
+    @url{download/nncp-0.6.tar.xz.sig, sig}
 @tab @code{DCFEE3F9 F669AC28 563C50DB 67BB8B43 0CFF4AB6 EC770ACE B5378D0B B40C0656}
 
 @item @ref{Release 0_5, 0.5} @tab 2017-01-19 @tab 743 KiB
-@tab @url{download/nncp-0.5.tar.xz, link} @url{download/nncp-0.5.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.5.tar.xz.meta4, meta4}
+    @url{download/nncp-0.5.tar.xz, link}
+    @url{download/nncp-0.5.tar.xz.sig, sig}
 @tab @code{D98F9149 5A6D6726 4C659640 1AD7F400 271A58CE 5D8D4AC5 5D1CF934 59BEDFA6}
 
 @item @ref{Release 0_4, 0.4} @tab 2017-01-17 @tab 741 KiB
-@tab @url{download/nncp-0.4.tar.xz, link} @url{download/nncp-0.4.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.4.tar.xz.meta4, meta4}
+    @url{download/nncp-0.4.tar.xz, link}
+    @url{download/nncp-0.4.tar.xz.sig, sig}
 @tab @code{93577327 B3DEBFE3 A80BEB0D 8325B2E6 0939EC55 4DBB05F3 4CA34B99 229C3722}
 
 @item @ref{Release 0_3, 0.3} @tab 2017-01-17 @tab 741 KiB
-@tab @url{download/nncp-0.3.tar.xz, link} @url{download/nncp-0.3.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.3.tar.xz.meta4, meta4}
+    @url{download/nncp-0.3.tar.xz, link}
+    @url{download/nncp-0.3.tar.xz.sig, sig}
 @tab @code{6E76EC5E 6B575C65 BF2D6388 870F2A1C 417D63E4 1628CAA1 BB499D0D 0634473B}
 
 @item @ref{Release 0_2, 0.2} @tab 2017-01-17 @tab 740 KiB
-@tab @url{download/nncp-0.2.tar.xz, link} @url{download/nncp-0.2.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.2.tar.xz.meta4, meta4}
+    @url{download/nncp-0.2.tar.xz, link}
+    @url{download/nncp-0.2.tar.xz.sig, sig}
 @tab @code{00BEAC5A 0C4083B0 42E3152B ACA6FF20 12768B82 CE24D716 8E04279C ECE14DB7}
 
 @item 0.1 @tab 2017-01-10 @tab 720 KiB
-@tab @url{download/nncp-0.1.tar.xz, link} @url{download/nncp-0.1.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-0.1.tar.xz.meta4, meta4}
+    @url{download/nncp-0.1.tar.xz, link}
+    @url{download/nncp-0.1.tar.xz.sig, sig}
 @tab @code{8F71D65B 70865EBF FE802CDF A5C14D00 A9FD6559 FD722E60 5D97E82C 5E2412C2}
 
 @end multitable
index 9d96e7dc05ec4147c5236b09c51017db4f5300b8..9f54d77658646b5cb46f2bdd22309fa4cdf46e3d 100644 (file)
@@ -8,7 +8,7 @@ Possibly NNCP package already exists for your distribution:
 @item @url{https://github.com/DragonFlyBSD/DPorts/tree/master/net/nncp, DragonFly BSD ports}
 @item @url{https://github.com/NixOS/nixpkgs/tree/master/pkgs/tools/misc/nncp, NixOS packages}
 @item @url{https://github.com/void-linux/void-packages/blob/master/srcpkgs/nncp/template, Void Linux}
-@item @url{https://qa.debian.org/developer.php?login=jgoerzen@@complete.org, Debian packages} (pending inclusion, maintainer's page)
+@item @url{https://tracker.debian.org/pkg/nncp, Debian packages}
 @end itemize
 
 NNCP should run on any POSIX-compatible operating system.
index de7cc92ade11f0092bb2d02691d27d653218f3d3..361967ce38e034c20e0a7747247dcf95ab225969 100644 (file)
@@ -55,28 +55,18 @@ There is a standard for creating
 output format.
 
 @example
-$ wget \
-    --warc-file www.example_com-$(date '+%Y%M%d%H%m%S') \
-    --no-warc-compression \
-    --no-warc-keep-log [@dots{}] \
-    http://www.example.com/
+$ wget [--page-requisites] [--recursive] \
+    --warc-file www.example.com-$(date '+%Y%M%d%H%m%S') \
+    --no-warc-keep-log --no-warc-digests \
+    [--no-warc-compression] [--warc-max-size=XXX] \
+    [@dots{}] http://www.example.com/
 @end example
 
-That command will create uncompressed @file{www.example_com-XXX.warc}
-web archive. By default, WARCs are compressed using
-@url{https://en.wikipedia.org/wiki/Gzip, gzip}, but, in example above,
-we have disabled it to compress with stronger and faster
-@url{https://en.wikipedia.org/wiki/Zstd, zstd}, before sending via
-@command{nncp-file}.
-
-There are plenty of software acting like HTTP proxy for your browser,
-allowing to view that WARC files. However you can extract files from
-that archive using @url{https://pypi.python.org/pypi/Warcat, warcat}
-utility, producing usual directory hierarchy:
-
-@example
-$ python3 -m warcat extract \
-    www.example_com-XXX.warc \
-    --output-dir www.example.com-XXX \
-    --progress
-@end example
+That command will create @file{www.example.com-XXX.warc} web archive.
+It could produce specialized segmented
+@url{https://en.wikipedia.org/wiki/Gzip, gzip} and
+@url{https://en.wikipedia.org/wiki/Zstandard, Zstandard}
+indexing/searching-friendly compressed archives. I can advise my own
+@url{http://www.tofuproxy.stargrave.org/WARCs.html, tofuproxy} software
+(also written on Go) to index, browse and extract those archives
+conveniently.
index 210a44016bc343f1c4dc0703dd8591a8452c23c0..697d0a148d4df730905dc67e0bf994113f60636c 100644 (file)
@@ -1,6 +1,36 @@
 @node Новости
 @section Новости
 
+@node Релиз 8.0.0
+@subsection Релиз 8.0.0
+@itemize
+
+@item
+@strong{Несовместимое} изменение формата зашифрованных пакетов: размеры
+полезной нагрузки и дополнения посылаются прямо внутри зашифрованного
+потока. Это даёт возможность потоково создавать шифрованные пакеты, без
+знания размеров заранее, без создания временного файла или буферизации в
+памяти.
+
+@item
+Производится корректная проверка дополнения зашифрованного пакета. Это
+не критичная проблема, но прежде ни размер, ни значение дополнения не
+были аутентифицированы, позволяя циклично откусывать по байту с конца и
+узнавать размер полезной нагрузки, наблюдая за реакцией по обработке
+такого зашифрованного пакета.
+
+@item
+@command{nncp-exec} больше не имеет @option{-use-tmp} опции, из-за
+совместимого с потоковой работой формата зашифрованных пакетов.
+
+@item
+У @command{nncp-file} и @command{nncp-exec} команд появилась опция
+@option{-maxsize}, ограничивающая максимальный результирующий размер
+зашифрованного пакета (возвращая ошибку если он превышен). Может быть
+полезно, так как размер полезной нагрузки может быть неизвестен заранее.
+
+@end itemize
+
 @node Релиз 7.7.0
 @subsection Релиз 7.7.0
 @itemize
index ad825f10b7be2e4a5e5504f2627f39da1e3be1dc..1cf64c543a6552bc9bfb9ffebfa3d3d8a600d518 100644 (file)
@@ -3,6 +3,35 @@
 
 See also this page @ref{Новости, on russian}.
 
+@node Release 8_0_0
+@section Release 8.0.0
+@itemize
+
+@item
+@strong{Incompatible} encrypted packet format change: payload and pad
+sizes are sent in-bound in the encrypted stream. That gives ability to
+streamingly create encrypted packets, without knowing sizes in advance,
+without creating temporary file or buffer data in memory.
+
+@item
+Proper encrypted packet padding verification is done now. This is not
+critical issue, but previously neither padding value, nor its size were
+authenticated, giving ability to iteratively strip trailing bytes and
+determine payload's size by observing the reaction of the encrypted
+packet processing.
+
+@item
+@command{nncp-exec} loses its @option{-use-tmp} option, because of
+streaming-compatible encrypted packets format.
+
+@item
+@command{nncp-file} and @command{nncp-exec} commands have
+@option{-maxsize} option, limiting maximal resulting encrypted packet's
+maximal size (returning error if it is exceeded). Could be useful,
+because no payload size could be known in advance.
+
+@end itemize
+
 @node Release 7_7_0
 @section Release 7.7.0
 @itemize
index f934a166886f131a467e255b8101b46928c4132c..ab04f9ea3e3f08884e44dd5ab14a46c3a073d3a2 100755 (executable)
@@ -113,10 +113,15 @@ chmod +x contrib/do
 cd ..
 tar cvf nncp-"$release".tar --uid=0 --gid=0 --numeric-owner nncp-"$release"
 xz -9v nncp-"$release".tar
-gpg --detach-sign --sign --local-user releases@nncpgo.org nncp-"$release".tar.xz
-mv -v $tmp/nncp-"$release".tar.xz $tmp/nncp-"$release".tar.xz.sig $cur/doc/download
-
 tarball=$cur/doc/download/nncp-"$release".tar.xz
+gpg --detach-sign --sign --local-user releases@nncpgo.org "$tarball"
+gpg --enarmor < "$tarball".sig |
+    sed "/^Comment:/d ; s/ARMORED FILE/SIGNATURE/" > "$tarball".asc
+meta4-create -file "$tarball" -mtime "$tarball" -sig "$tarball".asc \
+    hhttp://www.nncpgo.org/download/"$tarball" \
+    https://nncp.mirrors.quux.org/download/"$tarball" > "$tarball".meta4
+mv -v $tmp/"$tarball" $tmp/"$tarball".sig $tmp/"$tarball".meta4 $cur/doc/download
+
 size=$(( $(stat -f %z $tarball) / 1024 ))
 hash=$(gpg --print-md SHA256 < $tarball)
 release_date=$(date "+%Y-%m-%d")
@@ -125,7 +130,10 @@ release_underscored=`echo $release | tr . _`
 cat <<EOF
 An entry for documentation:
 @item @ref{Release $release_underscored, $release} @tab $release_date @tab $size KiB
-@tab @url{download/nncp-${release}.tar.xz, link} @url{download/nncp-${release}.tar.xz.sig, sign}
+@tab
+    @url{download/nncp-${release}.tar.xz.meta4, meta4}
+    @url{download/nncp-${release}.tar.xz, link}
+    @url{download/nncp-${release}.tar.xz.sig, sig}
 @tab @code{$hash}
 EOF
 
index 52ce4f61fe0eb1016d8effa4118d68718033eff4..def4f86d01250325aae5e88babd591a92a7df90b 100644 (file)
@@ -34,7 +34,7 @@ import (
 
        xdr "github.com/davecgh/go-xdr/xdr2"
        "github.com/dustin/go-humanize"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 const (
index 03a5a1a5c171a760be52a34631671bb7c635bd5d..0ac016b4cdb99b8f6280d93f6dc45ce719bbb718 100644 (file)
@@ -26,7 +26,7 @@ import (
        "strings"
        "time"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index 4024b5be4e07afe34c6f31adcf228492ed1b28ba..0e5de53b3c4953eeee2b08151cc7ef4ebca948f8 100644 (file)
@@ -27,7 +27,7 @@ import (
        "sync"
        "time"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index cbc8f8f0bca083f0c8caa98ae9d960febdd4ac6e..7e224e2b1a0896e02e59bff9b87e4944026c851e 100644 (file)
@@ -26,7 +26,7 @@ import (
        "os"
 
        "github.com/hjson/hjson-go"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index 1516d23f031b5fe7d421f561775d86abd2f435a9..465963d7fefc2e2c47ae6eeb88b369fbab22ae6a 100644 (file)
@@ -28,7 +28,7 @@ import (
        "os"
 
        xdr "github.com/davecgh/go-xdr/xdr2"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
        "golang.org/x/crypto/blake2b"
        "golang.org/x/term"
 )
index 4185302349bd3707e31da162794a727843724972..f0ddb940dda1c7a8e2c0dd75ce98c6aaa19865fc 100644 (file)
@@ -25,7 +25,7 @@ import (
        "os"
 
        "github.com/hjson/hjson-go"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index 56e4e4f40a7a9a886587e37a41476f0a568eb6b7..0ec00e91b2d0c65753e2e3fa9421814c02781316 100644 (file)
@@ -30,7 +30,7 @@ import (
        "golang.org/x/crypto/blake2b"
        "golang.org/x/crypto/nacl/box"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index b2a931c977f665bdb8a5d90ab2901cdca6b98f98..721a54f4a02a12c13002d953031a3fd1e318e797 100644 (file)
@@ -26,7 +26,7 @@ import (
        "path/filepath"
        "time"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index a74fb0e296fd10c1b0a38869d9f5ddcd11411eba..657cd146d05356c6d8436cc402e0d11f48ff7157 100644 (file)
@@ -27,7 +27,7 @@ import (
        "time"
 
        "github.com/gorhill/cronexpr"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index 4e9231d942671bd15418f28b69207ae91344fe81..2503ab228def05e48d471d302342bf467ada5534 100644 (file)
@@ -29,7 +29,7 @@ import (
        "time"
 
        "github.com/dustin/go-humanize"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
        "golang.org/x/net/netutil"
 )
 
index 11a99073bb441ab06642ae27f1ebb72babe555d6..7747f3fcf1aa478b96ee3b605c8baa7950892baa 100644 (file)
@@ -26,7 +26,7 @@ import (
        "os"
        "strings"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
@@ -40,12 +40,12 @@ func usage() {
 
 func main() {
        var (
-               useTmp       = flag.Bool("use-tmp", false, "Use temporary file, instead of memory buffer")
                noCompress   = flag.Bool("nocompress", false, "Do not compress input data")
                cfgPath      = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
                niceRaw      = flag.String("nice", nncp.NicenessFmt(nncp.DefaultNiceExec), "Outbound packet niceness")
                replyNiceRaw = flag.String("replynice", nncp.NicenessFmt(nncp.DefaultNiceFile), "Possible reply packet niceness")
                minSize      = flag.Uint64("minsize", 0, "Minimal required resulting packet size, in KiB")
+               argMaxSize   = flag.Uint64("maxsize", 0, "Maximal allowable resulting packet size, in KiB")
                viaOverride  = flag.String("via", "", "Override Via path to destination node")
                spoolPath    = flag.String("spool", "", "Override path to spool")
                logPath      = flag.String("log", "", "Override path to logfile")
@@ -111,6 +111,11 @@ func main() {
                }
        }
 
+       maxSize := int64(nncp.MaxFileSize)
+       if *argMaxSize > 0 {
+               maxSize = int64(*argMaxSize) * 1024
+       }
+
        nncp.ViaOverride(*viaOverride, ctx, node)
        ctx.Umask()
 
@@ -122,7 +127,7 @@ func main() {
                flag.Args()[2:],
                bufio.NewReader(os.Stdin),
                int64(*minSize)*1024,
-               *useTmp,
+               maxSize,
                *noCompress,
                areaId,
        ); err != nil {
index 6870aec808c818b7c1dfb9aedcf94a2a3fc81e67..1d72a9dc0047440a7fc42db494e673b802a977b6 100644 (file)
@@ -25,7 +25,7 @@ import (
        "os"
        "strings"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
@@ -36,7 +36,8 @@ func usage() {
                os.Args[0], nncp.AreaDir)
        flag.PrintDefaults()
        fmt.Fprint(os.Stderr, `
-If SRC equals to -, then read data from stdin to temporary file.
+If SRC equals to "-", then data is read from stdin.
+If SRC is directory, then create pax archive with its contents.
 
 -minsize/-chunked take NODE's freq.minsize/freq.chunked configuration
 options by default. You can forcefully turn them off by specifying 0 value.
@@ -48,6 +49,7 @@ func main() {
                cfgPath      = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
                niceRaw      = flag.String("nice", nncp.NicenessFmt(nncp.DefaultNiceFile), "Outbound packet niceness")
                argMinSize   = flag.Int64("minsize", -1, "Minimal required resulting packet size, in KiB")
+               argMaxSize   = flag.Uint64("maxsize", 0, "Maximal allowable resulting packets size, in KiB")
                argChunkSize = flag.Int64("chunked", -1, "Split file on specified size chunks, in KiB")
                viaOverride  = flag.String("via", "", "Override Via path to destination node")
                spoolPath    = flag.String("spool", "", "Override path to spool")
@@ -124,6 +126,13 @@ func main() {
        nncp.ViaOverride(*viaOverride, ctx, node)
        ctx.Umask()
 
+       var chunkSize int64
+       if *argChunkSize < 0 {
+               chunkSize = node.FreqChunked
+       } else if *argChunkSize > 0 {
+               chunkSize = *argChunkSize * 1024
+       }
+
        var minSize int64
        if *argMinSize < 0 {
                minSize = node.FreqMinSize
@@ -131,14 +140,9 @@ func main() {
                minSize = *argMinSize * 1024
        }
 
-       var chunkSize int64
-       if *argChunkSize < 0 {
-               chunkSize = node.FreqChunked
-       } else if *argChunkSize > 0 {
-               chunkSize = *argChunkSize * 1024
-       }
-       if chunkSize == 0 {
-               chunkSize = nncp.MaxFileSize
+       maxSize := int64(nncp.MaxFileSize)
+       if *argMaxSize > 0 {
+               maxSize = int64(*argMaxSize) * 1024
        }
 
        if err = ctx.TxFile(
@@ -148,7 +152,7 @@ func main() {
                strings.Join(splitted, ":"),
                chunkSize,
                minSize,
-               nncp.MaxFileSize,
+               maxSize,
                areaId,
        ); err != nil {
                log.Fatalln(err)
index 9596e80787e36a7864f294fd1e609affca44662c..78b565aff66346927539af07cf6476019774d603 100644 (file)
@@ -26,7 +26,7 @@ import (
        "path/filepath"
        "strings"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index bcdbca54fe4786cbdb94b19acf1cbf1cfc527646..a350134c0486f9c46a1d3ab44526b6ea8c725aa2 100644 (file)
@@ -28,7 +28,7 @@ import (
        "os"
        "sync"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index 82becc67ae41840edb7058267b8f5045d53865b4..c6bd1205e18fe7b4ede681e8eeaf7db43af203e8 100644 (file)
@@ -25,7 +25,7 @@ import (
        "log"
        "os"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
        "go.cypherpunks.ru/recfile"
 )
 
index 58a45997af2645fa20a0bf89dbe14019f59ed33d..c22b1f71038e209bda406fe3c5bf5903420c6291 100644 (file)
@@ -29,7 +29,7 @@ import (
 
        xdr "github.com/davecgh/go-xdr/xdr2"
        "github.com/klauspost/compress/zstd"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
@@ -219,6 +219,8 @@ func main() {
                case nncp.MagicNNCPEv4.B:
                        log.Fatalln(nncp.MagicNNCPEv4.TooOld())
                case nncp.MagicNNCPEv5.B:
+                       log.Fatalln(nncp.MagicNNCPEv5.TooOld())
+               case nncp.MagicNNCPEv6.B:
                        doEncrypted(ctx, pktEnc, *dump, beginning[:nncp.PktEncOverhead])
                        return
                }
index a1ee4a3a9c97aca146f2dfd8c6b44df82ce5fdac..6a237f4ea1b3d54499e43337adb1686dd8c92b52 100644 (file)
@@ -35,7 +35,7 @@ import (
 
        xdr "github.com/davecgh/go-xdr/xdr2"
        "github.com/dustin/go-humanize"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index b9ca28977c7380834393c7709cc585a9e95a1238..501f5fcd2c606ea3814978fc57300cc320fddd89 100644 (file)
@@ -29,7 +29,7 @@ import (
        "strings"
        "time"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
@@ -186,8 +186,11 @@ func main() {
                        continue
                }
                remove := func(xx nncp.TRxTx) error {
-                       return filepath.Walk(
-                               filepath.Join(ctx.Spool, node.Id.String(), string(xx)),
+                       p := filepath.Join(ctx.Spool, node.Id.String(), string(xx))
+                       if _, err := os.Stat(p); err != nil && os.IsNotExist(err) {
+                               return nil
+                       }
+                       return filepath.Walk(p,
                                func(path string, info os.FileInfo, err error) error {
                                        if err != nil {
                                                return err
index ea5f407dfddd8738d26f41ffa1311cfbf0b4264e..469bbed63afc78780bd6575c94686e07c4b69f82 100644 (file)
@@ -26,7 +26,7 @@ import (
        "sort"
 
        "github.com/dustin/go-humanize"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index 8a3b8c8cbc93d856ab8490d75b5ea0541faf92df..c4b012164220d2caa03265258441e4c845dfb425 100644 (file)
@@ -26,7 +26,7 @@ import (
        "path/filepath"
        "time"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index 6c6e3d47cb9b942186a9a40e369605694c812367..ed96f7321e275128515f5e50abb2d1af337bb8b9 100644 (file)
@@ -27,7 +27,7 @@ import (
        "path/filepath"
        "strings"
 
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
@@ -137,7 +137,15 @@ func main() {
        if err != nil {
                panic(err)
        }
-       if _, err = ctx.Tx(node, pktTrns, nice, fi.Size(), 0, fd, pktName, nil); err != nil {
+       if _, _, err = ctx.Tx(
+               node,
+               pktTrns,
+               nice,
+               fi.Size(), 0, nncp.MaxFileSize,
+               fd,
+               pktName,
+               nil,
+       ); err != nil {
                log.Fatalln(err)
        }
 }
index e565ece351e5eb72a525f2a682e5680104828629..b65686c8d14b6caa7d271d00053a13574075674d 100644 (file)
@@ -29,7 +29,7 @@ import (
        "path/filepath"
 
        "github.com/dustin/go-humanize"
-       "go.cypherpunks.ru/nncp/v7"
+       "go.cypherpunks.ru/nncp/v8"
 )
 
 func usage() {
index bce0675e164308c90d23441f81b30e8e6b3e5422..618c4132694ce17f8955cb2384d87e95d376e7d0 100644 (file)
@@ -1,4 +1,4 @@
-module go.cypherpunks.ru/nncp/v7
+module go.cypherpunks.ru/nncp/v8
 
 require (
        github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892
@@ -7,14 +7,14 @@ require (
        github.com/fsnotify/fsnotify v1.5.1
        github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75
        github.com/hjson/hjson-go v3.1.0+incompatible
-       github.com/klauspost/compress v1.13.1
+       github.com/klauspost/compress v1.13.6
        go.cypherpunks.ru/balloon v1.1.1
        go.cypherpunks.ru/recfile v0.4.3
-       golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
+       golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
        golang.org/x/net v0.0.0-20210614182718-04defd469f4e
-       golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf
-       golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
-       lukechampine.com/blake3 v1.1.5
+       golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42
+       golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
+       lukechampine.com/blake3 v1.1.6
 )
 
 go 1.13
index 05a086e787bc90dd76f5d86ae8534bd6d89b69c2..acfe21526a3ed56926a8219bd281a443d6b26253 100644 (file)
@@ -6,16 +6,14 @@ github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
 github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
 github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
 github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY=
 github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
 github.com/hjson/hjson-go v3.1.0+incompatible h1:DY/9yE8ey8Zv22bY+mHV1uk2yRy0h8tKhZ77hEdi0Aw=
 github.com/hjson/hjson-go v3.1.0+incompatible/go.mod h1:qsetwF8NlsTsOTwZTApNlTCerV+b2GjYRRcIk4JMFio=
-github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
-github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
-github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
+github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
 github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
 github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -26,8 +24,8 @@ go.cypherpunks.ru/balloon v1.1.1/go.mod h1:k4s4ozrIrhpBjj78Z7LX8ZHxMQ+XE7DZUWl8g
 go.cypherpunks.ru/recfile v0.4.3 h1:ephokihmV//p0ob6gx2FWXvm28/NBDbWTOJPUNahxO8=
 go.cypherpunks.ru/recfile v0.4.3/go.mod h1:sR+KajB+vzofL3SFVFwKt3Fke0FaCcN1g3YPNAhU3qI=
 golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
 golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -35,15 +33,15 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42 h1:G2DDmludOQZoWbpCr7OKDxnl478ZBGMcOhrv+ooX/Q4=
+golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-lukechampine.com/blake3 v1.1.5 h1:hsACfxWvLdGmjYbWGrumQIphOvO+ZruZehWtgd2fxoM=
-lukechampine.com/blake3 v1.1.5/go.mod h1:hE8RpzdO8ttZ7446CXEwDP1eu2V4z7stv0Urj1El20g=
+lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c=
+lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
index 7e920fd5c60182609008e3b2457ab32324bba17c..ae18b74cfdaf3fad39883d85a9b938825dc1f8d0 100644 (file)
@@ -181,6 +181,8 @@ func (ctx *Ctx) jobsFind(nodeId *NodeId, xx TRxTx, nock, part bool) chan Job {
                        case MagicNNCPEv4.B:
                                err = MagicNNCPEv4.TooOld()
                        case MagicNNCPEv5.B:
+                               err = MagicNNCPEv5.TooOld()
+                       case MagicNNCPEv6.B:
                        default:
                                err = BadMagic
                        }
index a2e72b238f61e1a570e194d91edca8c58263c80b..e4b860f9e5061a7c7accd23b7341e66050c611b7 100644 (file)
@@ -42,32 +42,45 @@ type LE struct {
 type LEs []LE
 
 func (les LEs) Rec() string {
-       fields := make([]recfile.Field, 0, len(les)+1)
-       fields = append(fields, recfile.Field{
-               Name: "When", Value: time.Now().UTC().Format(time.RFC3339Nano),
-       })
-       var val string
-       for _, le := range les {
-               switch v := le.V.(type) {
-               case int, int8, uint8, int64, uint64:
-                       val = fmt.Sprintf("%d", v)
-               case bool:
-                       val = fmt.Sprintf("%v", v)
-               default:
-                       val = fmt.Sprintf("%s", v)
-               }
-               fields = append(fields, recfile.Field{Name: le.K, Value: val})
-       }
        b := bytes.NewBuffer(make([]byte, 0, 1<<10))
        w := recfile.NewWriter(b)
        _, err := w.RecordStart()
        if err != nil {
                panic(err)
        }
-       _, err = w.WriteFields(fields...)
+       _, err = w.WriteFields(recfile.Field{
+               Name:  "When",
+               Value: time.Now().UTC().Format(time.RFC3339Nano),
+       })
        if err != nil {
                panic(err)
        }
+       for _, le := range les {
+               switch v := le.V.(type) {
+               case int, int8, uint8, int64, uint64:
+                       _, err = w.WriteFields(recfile.Field{
+                               Name:  le.K,
+                               Value: fmt.Sprintf("%d", v),
+                       })
+               case bool:
+                       _, err = w.WriteFields(recfile.Field{
+                               Name:  le.K,
+                               Value: fmt.Sprintf("%v", v),
+                       })
+               case []string:
+                       if len(v) > 0 {
+                               _, err = w.WriteFieldMultiline(le.K, v)
+                       }
+               default:
+                       _, err = w.WriteFields(recfile.Field{
+                               Name:  le.K,
+                               Value: fmt.Sprintf("%s", v),
+                       })
+               }
+               if err != nil {
+                       panic(err)
+               }
+       }
        return b.String()
 }
 
@@ -121,6 +134,9 @@ func (ctx *Ctx) LogI(who string, les LEs, msg func(LEs) string) {
        les = append(LEs{{"Who", who}}, les...)
        les = append(les, LE{"Msg", msg(les)})
        rec := les.Rec()
+       if ctx.Debug {
+               fmt.Fprint(os.Stderr, rec)
+       }
        if !ctx.Quiet {
                fmt.Fprintln(os.Stderr, ctx.HumanizeRec(rec))
        }
@@ -131,6 +147,9 @@ func (ctx *Ctx) LogE(who string, les LEs, err error, msg func(LEs) string) {
        les = append(LEs{{"Err", err.Error()}, {"Who", who}}, les...)
        les = append(les, LE{"Msg", msg(les)})
        rec := les.Rec()
+       if ctx.Debug {
+               fmt.Fprint(os.Stderr, rec)
+       }
        if !ctx.Quiet {
                fmt.Fprintln(os.Stderr, ctx.HumanizeRec(rec))
        }
index 938d33347712f9f3f6792c8761fd71cc24508bcd..f0fdb58410316531b848b75ed6027c5543cf2567 100644 (file)
@@ -67,7 +67,11 @@ var (
        }
        MagicNNCPEv5 = Magic{
                B:    [8]byte{'N', 'N', 'C', 'P', 'E', 0, 0, 5},
-               Name: "NNCPEv5 (encrypted packet v5)", Till: "now",
+               Name: "NNCPEv5 (encrypted packet v5)", Till: "7.7.0",
+       }
+       MagicNNCPEv6 = Magic{
+               B:    [8]byte{'N', 'N', 'C', 'P', 'E', 0, 0, 6},
+               Name: "NNCPEv6 (encrypted packet v6)", Till: "now",
        }
        MagicNNCPSv1 = Magic{
                B:    [8]byte{'N', 'N', 'C', 'P', 'S', 0, 0, 1},
index 7682a791e3ca83bccc65d3bd58803e2b06fb98d0..90713b04eda37bd0856c8595f97cdb783ad8cddd 100644 (file)
@@ -64,12 +64,11 @@ func NicenessParse(s string) (uint8, error) {
                        return 0, errors.New("too big niceness delta")
                }
                return baseNice - uint8(delta), nil
-       } else {
-               if delta > 32 || (baseNice == NiceBulk && delta > 31) {
-                       return 0, errors.New("too big niceness delta")
-               }
-               return baseNice + uint8(delta), nil
        }
+       if delta > 32 || (baseNice == NiceBulk && delta > 31) {
+               return 0, errors.New("too big niceness delta")
+       }
+       return baseNice + uint8(delta), nil
 }
 
 func NicenessFmt(nice uint8) string {
index e6ba59771f616d82ff3d04683013f60db7984b79..3bf6d1a5c6548222aa2f28453fadddeab2cea5c2 100644 (file)
@@ -40,7 +40,7 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.`
 const Base32Encoded32Len = 52
 
 var (
-       Version string = "7.7.0"
+       Version string = "8.0.0"
 
        Base32Codec *base32.Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
 )
index aa3025d988b40cc73e41436a84deb36a7a3337c3..a1993e7a8ed98334e2c96dc14e22cc28d73235a1 100644 (file)
@@ -21,7 +21,6 @@ import (
        "bytes"
        "crypto/cipher"
        "crypto/rand"
-       "encoding/binary"
        "errors"
        "io"
 
@@ -49,15 +48,20 @@ const (
        MaxPathSize = 1<<8 - 1
 
        NNCPBundlePrefix = "NNCP"
-
-       PktSizeOverhead = 8 + poly1305.TagSize
 )
 
 var (
        BadPktType error = errors.New("Unknown packet type")
 
-       PktOverhead    int64
-       PktEncOverhead int64
+       DeriveKeyFullCtx = string(MagicNNCPEv6.B[:]) + " FULL"
+       DeriveKeySizeCtx = string(MagicNNCPEv6.B[:]) + " SIZE"
+       DeriveKeyPadCtx  = string(MagicNNCPEv6.B[:]) + " PAD"
+
+       PktOverhead     int64
+       PktEncOverhead  int64
+       PktSizeOverhead int64
+
+       TooBig = errors.New("Too big than allowed")
 )
 
 type Pkt struct {
@@ -85,9 +89,28 @@ type PktEnc struct {
        Sign      [ed25519.SignatureSize]byte
 }
 
+type PktSize struct {
+       Payload uint64
+       Pad     uint64
+}
+
+func NewPkt(typ PktType, nice uint8, path []byte) (*Pkt, error) {
+       if len(path) > MaxPathSize {
+               return nil, errors.New("Too long path")
+       }
+       pkt := Pkt{
+               Magic:   MagicNNCPPv3.B,
+               Type:    typ,
+               Nice:    nice,
+               PathLen: uint8(len(path)),
+       }
+       copy(pkt.Path[:], path)
+       return &pkt, nil
+}
+
 func init() {
-       pkt := Pkt{Type: PktTypeFile}
        var buf bytes.Buffer
+       pkt := Pkt{Type: PktTypeFile}
        n, err := xdr.Marshal(&buf, pkt)
        if err != nil {
                panic(err)
@@ -100,7 +123,7 @@ func init() {
                panic(err)
        }
        pktEnc := PktEnc{
-               Magic:     MagicNNCPEv5.B,
+               Magic:     MagicNNCPEv6.B,
                Sender:    dummyId,
                Recipient: dummyId,
        }
@@ -109,20 +132,14 @@ func init() {
                panic(err)
        }
        PktEncOverhead = int64(n)
-}
+       buf.Reset()
 
-func NewPkt(typ PktType, nice uint8, path []byte) (*Pkt, error) {
-       if len(path) > MaxPathSize {
-               return nil, errors.New("Too long path")
-       }
-       pkt := Pkt{
-               Magic:   MagicNNCPPv3.B,
-               Type:    typ,
-               Nice:    nice,
-               PathLen: uint8(len(path)),
+       size := PktSize{}
+       n, err = xdr.Marshal(&buf, size)
+       if err != nil {
+               panic(err)
        }
-       copy(pkt.Path[:], path)
-       return &pkt, nil
+       PktSizeOverhead = int64(n)
 }
 
 func ctrIncr(b []byte) {
@@ -135,53 +152,28 @@ func ctrIncr(b []byte) {
        panic("counter overflow")
 }
 
-func aeadProcess(
-       aead cipher.AEAD,
-       nonce, ad []byte,
-       doEncrypt bool,
-       r io.Reader,
-       w io.Writer,
-) (int64, error) {
-       ciphCtr := nonce[len(nonce)-8:]
-       buf := make([]byte, EncBlkSize+aead.Overhead())
-       var toRead []byte
-       var toWrite []byte
-       var n int
-       var readBytes int64
-       var err error
-       if doEncrypt {
-               toRead = buf[:EncBlkSize]
-       } else {
-               toRead = buf
+func TbsPrepare(our *NodeOur, their *Node, pktEnc *PktEnc) []byte {
+       tbs := PktTbs{
+               Magic:     MagicNNCPEv6.B,
+               Nice:      pktEnc.Nice,
+               Sender:    their.Id,
+               Recipient: our.Id,
+               ExchPub:   pktEnc.ExchPub,
        }
-       for {
-               n, err = io.ReadFull(r, toRead)
-               if err != nil {
-                       if err == io.EOF {
-                               break
-                       }
-                       if err != io.ErrUnexpectedEOF {
-                               return readBytes + int64(n), err
-                       }
-               }
-               readBytes += int64(n)
-               ctrIncr(ciphCtr)
-               if doEncrypt {
-                       toWrite = aead.Seal(buf[:0], nonce, buf[:n], ad)
-               } else {
-                       toWrite, err = aead.Open(buf[:0], nonce, buf[:n], ad)
-                       if err != nil {
-                               return readBytes, err
-                       }
-               }
-               if _, err = w.Write(toWrite); err != nil {
-                       return readBytes, err
-               }
+       var tbsBuf bytes.Buffer
+       if _, err := xdr.Marshal(&tbsBuf, &tbs); err != nil {
+               panic(err)
        }
-       return readBytes, nil
+       return tbsBuf.Bytes()
+}
+
+func TbsVerify(our *NodeOur, their *Node, pktEnc *PktEnc) ([]byte, bool, error) {
+       tbs := TbsPrepare(our, their, pktEnc)
+       return tbs, ed25519.Verify(their.SignPub, tbs, pktEnc.Sign[:]), nil
 }
 
 func sizeWithTags(size int64) (fullSize int64) {
+       size += PktSizeOverhead
        fullSize = size + (size/EncBlkSize)*poly1305.TagSize
        if size%EncBlkSize != 0 {
                fullSize += poly1305.TagSize
@@ -189,122 +181,182 @@ func sizeWithTags(size int64) (fullSize int64) {
        return
 }
 
+func sizePadCalc(sizePayload, minSize int64, wrappers int) (sizePad int64) {
+       expectedSize := sizePayload - PktOverhead
+       for i := 0; i < wrappers; i++ {
+               expectedSize = PktEncOverhead + sizeWithTags(PktOverhead+expectedSize)
+       }
+       sizePad = minSize - expectedSize
+       if sizePad < 0 {
+               sizePad = 0
+       }
+       return
+}
+
 func PktEncWrite(
-       our *NodeOur,
-       their *Node,
-       pkt *Pkt,
-       nice uint8,
-       size, padSize int64,
-       data io.Reader,
-       out io.Writer,
-) ([]byte, error) {
-       pubEph, prvEph, err := box.GenerateKey(rand.Reader)
+       our *NodeOur, their *Node,
+       pkt *Pkt, nice uint8,
+       minSize, maxSize int64, wrappers int,
+       r io.Reader, w io.Writer,
+) (pktEncRaw []byte, size int64, err error) {
+       pub, prv, err := box.GenerateKey(rand.Reader)
        if err != nil {
-               return nil, err
+               return nil, 0, err
        }
-       var pktBuf bytes.Buffer
-       if _, err := xdr.Marshal(&pktBuf, pkt); err != nil {
-               return nil, err
+
+       var buf bytes.Buffer
+       _, err = xdr.Marshal(&buf, pkt)
+       if err != nil {
+               return
        }
+       pktRaw := make([]byte, buf.Len())
+       copy(pktRaw, buf.Bytes())
+       buf.Reset()
+
        tbs := PktTbs{
-               Magic:     MagicNNCPEv5.B,
+               Magic:     MagicNNCPEv6.B,
                Nice:      nice,
                Sender:    our.Id,
                Recipient: their.Id,
-               ExchPub:   *pubEph,
+               ExchPub:   *pub,
        }
-       var tbsBuf bytes.Buffer
-       if _, err = xdr.Marshal(&tbsBuf, &tbs); err != nil {
-               return nil, err
+       _, err = xdr.Marshal(&buf, &tbs)
+       if err != nil {
+               return
        }
        signature := new([ed25519.SignatureSize]byte)
-       copy(signature[:], ed25519.Sign(our.SignPrv, tbsBuf.Bytes()))
+       copy(signature[:], ed25519.Sign(our.SignPrv, buf.Bytes()))
+       ad := blake3.Sum256(buf.Bytes())
+       buf.Reset()
+
        pktEnc := PktEnc{
-               Magic:     MagicNNCPEv5.B,
+               Magic:     MagicNNCPEv6.B,
                Nice:      nice,
                Sender:    our.Id,
                Recipient: their.Id,
-               ExchPub:   *pubEph,
+               ExchPub:   *pub,
                Sign:      *signature,
        }
-       ad := blake3.Sum256(tbsBuf.Bytes())
-       tbsBuf.Reset()
-       if _, err = xdr.Marshal(&tbsBuf, &pktEnc); err != nil {
-               return nil, err
+       _, err = xdr.Marshal(&buf, &pktEnc)
+       if err != nil {
+               return
        }
-       pktEncRaw := tbsBuf.Bytes()
-       if _, err = out.Write(pktEncRaw); err != nil {
-               return nil, err
+       pktEncRaw = make([]byte, buf.Len())
+       copy(pktEncRaw, buf.Bytes())
+       buf.Reset()
+       _, err = w.Write(pktEncRaw)
+       if err != nil {
+               return
        }
-       sharedKey := new([32]byte)
-       curve25519.ScalarMult(sharedKey, prvEph, their.ExchPub)
 
-       key := make([]byte, chacha20poly1305.KeySize)
-       blake3.DeriveKey(key, string(MagicNNCPEv5.B[:]), sharedKey[:])
-       aead, err := chacha20poly1305.New(key)
+       sharedKey := new([32]byte)
+       curve25519.ScalarMult(sharedKey, prv, their.ExchPub)
+       keyFull := make([]byte, chacha20poly1305.KeySize)
+       keySize := make([]byte, chacha20poly1305.KeySize)
+       blake3.DeriveKey(keyFull, DeriveKeyFullCtx, sharedKey[:])
+       blake3.DeriveKey(keySize, DeriveKeySizeCtx, sharedKey[:])
+       aeadFull, err := chacha20poly1305.New(keyFull)
+       if err != nil {
+               return
+       }
+       aeadSize, err := chacha20poly1305.New(keySize)
        if err != nil {
-               return nil, err
+               return
        }
-       nonce := make([]byte, aead.NonceSize())
+       nonce := make([]byte, aeadFull.NonceSize())
 
-       fullSize := int64(pktBuf.Len()) + size
-       sizeBuf := make([]byte, 8+aead.Overhead())
-       binary.BigEndian.PutUint64(sizeBuf, uint64(sizeWithTags(fullSize)))
-       if _, err = out.Write(aead.Seal(sizeBuf[:0], nonce, sizeBuf[:8], ad[:])); err != nil {
-               return nil, err
+       data := make([]byte, EncBlkSize, EncBlkSize+aeadFull.Overhead())
+       mr := io.MultiReader(bytes.NewReader(pktRaw), r)
+       var sizePayload int64
+       var n int
+       var ct []byte
+       for {
+               n, err = io.ReadFull(mr, data)
+               sizePayload += int64(n)
+               if sizePayload > maxSize {
+                       err = TooBig
+                       return
+               }
+               if err == nil {
+                       ct = aeadFull.Seal(data[:0], nonce, data[:n], ad[:])
+                       _, err = w.Write(ct)
+                       if err != nil {
+                               return
+                       }
+                       ctrIncr(nonce)
+                       continue
+               }
+               if !(err == io.EOF || err == io.ErrUnexpectedEOF) {
+                       return
+               }
+               break
        }
 
-       lr := io.LimitedReader{R: data, N: size}
-       mr := io.MultiReader(&pktBuf, &lr)
-       written, err := aeadProcess(aead, nonce, ad[:], true, mr, out)
+       sizePad := sizePadCalc(sizePayload, minSize, wrappers)
+       _, err = xdr.Marshal(&buf, &PktSize{uint64(sizePayload), uint64(sizePad)})
        if err != nil {
-               return nil, err
-       }
-       if written != fullSize {
-               return nil, io.ErrUnexpectedEOF
+               return
        }
-       if padSize > 0 {
-               blake3.DeriveKey(key, string(MagicNNCPEv5.B[:])+" PAD", sharedKey[:])
-               xof := blake3.New(32, key).XOF()
-               if _, err = io.CopyN(out, xof, padSize); err != nil {
-                       return nil, err
+
+       var aeadLast cipher.AEAD
+       if n+int(PktSizeOverhead) > EncBlkSize {
+               left := make([]byte, (n+int(PktSizeOverhead))-EncBlkSize)
+               copy(left, data[n-len(left):])
+               copy(data[PktSizeOverhead:], data[:n-len(left)])
+               copy(data[:PktSizeOverhead], buf.Bytes())
+               ct = aeadSize.Seal(data[:0], nonce, data[:EncBlkSize], ad[:])
+               _, err = w.Write(ct)
+               if err != nil {
+                       return
                }
+               ctrIncr(nonce)
+               copy(data, left)
+               n = len(left)
+               aeadLast = aeadFull
+       } else {
+               copy(data[PktSizeOverhead:], data[:n])
+               copy(data[:PktSizeOverhead], buf.Bytes())
+               n += int(PktSizeOverhead)
+               aeadLast = aeadSize
        }
-       return pktEncRaw, nil
-}
 
-func TbsPrepare(our *NodeOur, their *Node, pktEnc *PktEnc) []byte {
-       tbs := PktTbs{
-               Magic:     MagicNNCPEv5.B,
-               Nice:      pktEnc.Nice,
-               Sender:    their.Id,
-               Recipient: our.Id,
-               ExchPub:   pktEnc.ExchPub,
+       var sizeBlockPadded int
+       var sizePadLeft int64
+       if sizePad > EncBlkSize-int64(n) {
+               sizeBlockPadded = EncBlkSize
+               sizePadLeft = sizePad - (EncBlkSize - int64(n))
+       } else {
+               sizeBlockPadded = n + int(sizePad)
+               sizePadLeft = 0
        }
-       var tbsBuf bytes.Buffer
-       if _, err := xdr.Marshal(&tbsBuf, &tbs); err != nil {
-               panic(err)
+       for i := n; i < sizeBlockPadded; i++ {
+               data[i] = 0
+       }
+       ct = aeadLast.Seal(data[:0], nonce, data[:sizeBlockPadded], ad[:])
+       _, err = w.Write(ct)
+       if err != nil {
+               return
        }
-       return tbsBuf.Bytes()
-}
 
-func TbsVerify(our *NodeOur, their *Node, pktEnc *PktEnc) ([]byte, bool, error) {
-       tbs := TbsPrepare(our, their, pktEnc)
-       return tbs, ed25519.Verify(their.SignPub, tbs, pktEnc.Sign[:]), nil
+       size = sizePayload
+       if sizePadLeft > 0 {
+               keyPad := make([]byte, chacha20poly1305.KeySize)
+               blake3.DeriveKey(keyPad, DeriveKeyPadCtx, sharedKey[:])
+               _, err = io.CopyN(w, blake3.New(32, keyPad).XOF(), sizePadLeft)
+       }
+       return
 }
 
 func PktEncRead(
-       our *NodeOur,
-       nodes map[NodeId]*Node,
-       data io.Reader,
-       out io.Writer,
+       our *NodeOur, nodes map[NodeId]*Node,
+       r io.Reader, w io.Writer,
        signatureVerify bool,
        sharedKeyCached []byte,
-) ([]byte, *Node, int64, error) {
+) (sharedKey []byte, their *Node, size int64, err error) {
        var pktEnc PktEnc
-       _, err := xdr.Unmarshal(data, &pktEnc)
+       _, err = xdr.Unmarshal(r, &pktEnc)
        if err != nil {
-               return nil, nil, 0, err
+               return
        }
        switch pktEnc.Magic {
        case MagicNNCPEv1.B:
@@ -316,66 +368,159 @@ func PktEncRead(
        case MagicNNCPEv4.B:
                err = MagicNNCPEv4.TooOld()
        case MagicNNCPEv5.B:
+               err = MagicNNCPEv5.TooOld()
+       case MagicNNCPEv6.B:
        default:
                err = BadMagic
        }
        if err != nil {
-               return nil, nil, 0, err
+               return
        }
        if *pktEnc.Recipient != *our.Id {
-               return nil, nil, 0, errors.New("Invalid recipient")
+               err = errors.New("Invalid recipient")
+               return
        }
+
        var tbsRaw []byte
-       var their *Node
        if signatureVerify {
                their = nodes[*pktEnc.Sender]
                if their == nil {
-                       return nil, nil, 0, errors.New("Unknown sender")
+                       err = errors.New("Unknown sender")
+                       return
                }
                var verified bool
                tbsRaw, verified, err = TbsVerify(our, their, &pktEnc)
                if err != nil {
-                       return nil, nil, 0, err
+                       return
                }
                if !verified {
-                       return nil, their, 0, errors.New("Invalid signature")
+                       err = errors.New("Invalid signature")
+                       return
                }
        } else {
                tbsRaw = TbsPrepare(our, &Node{Id: pktEnc.Sender}, &pktEnc)
        }
        ad := blake3.Sum256(tbsRaw)
-       sharedKey := new([32]byte)
        if sharedKeyCached == nil {
-               curve25519.ScalarMult(sharedKey, our.ExchPrv, &pktEnc.ExchPub)
+               key := new([32]byte)
+               curve25519.ScalarMult(key, our.ExchPrv, &pktEnc.ExchPub)
+               sharedKey = key[:]
        } else {
-               copy(sharedKey[:], sharedKeyCached)
+               sharedKey = sharedKeyCached
        }
 
-       key := make([]byte, chacha20poly1305.KeySize)
-       blake3.DeriveKey(key, string(MagicNNCPEv5.B[:]), sharedKey[:])
-       aead, err := chacha20poly1305.New(key)
+       keyFull := make([]byte, chacha20poly1305.KeySize)
+       keySize := make([]byte, chacha20poly1305.KeySize)
+       blake3.DeriveKey(keyFull, DeriveKeyFullCtx, sharedKey[:])
+       blake3.DeriveKey(keySize, DeriveKeySizeCtx, sharedKey[:])
+       aeadFull, err := chacha20poly1305.New(keyFull)
        if err != nil {
-               return sharedKey[:], their, 0, err
+               return
+       }
+       aeadSize, err := chacha20poly1305.New(keySize)
+       if err != nil {
+               return
+       }
+       nonce := make([]byte, aeadFull.NonceSize())
+
+       ct := make([]byte, EncBlkSize+aeadFull.Overhead())
+       pt := make([]byte, EncBlkSize)
+       var n int
+FullRead:
+       for {
+               n, err = io.ReadFull(r, ct)
+               switch err {
+               case nil:
+                       pt, err = aeadFull.Open(pt[:0], nonce, ct, ad[:])
+                       if err != nil {
+                               break FullRead
+                       }
+                       size += EncBlkSize
+                       _, err = w.Write(pt)
+                       if err != nil {
+                               return
+                       }
+                       ctrIncr(nonce)
+                       continue
+               case io.ErrUnexpectedEOF:
+                       break FullRead
+               default:
+                       return
+               }
        }
-       nonce := make([]byte, aead.NonceSize())
 
-       sizeBuf := make([]byte, 8+aead.Overhead())
-       if _, err = io.ReadFull(data, sizeBuf); err != nil {
-               return sharedKey[:], their, 0, err
+       pt, err = aeadSize.Open(pt[:0], nonce, ct[:n], ad[:])
+       if err != nil {
+               return
        }
-       sizeBuf, err = aead.Open(sizeBuf[:0], nonce, sizeBuf, ad[:])
+       var pktSize PktSize
+       _, err = xdr.Unmarshal(bytes.NewReader(pt), &pktSize)
        if err != nil {
-               return sharedKey[:], their, 0, err
+               return
        }
-       size := int64(binary.BigEndian.Uint64(sizeBuf))
+       pt = pt[PktSizeOverhead:]
 
-       lr := io.LimitedReader{R: data, N: size}
-       written, err := aeadProcess(aead, nonce, ad[:], false, &lr, out)
+       left := int64(pktSize.Payload) - size
+       for left > int64(len(pt)) {
+               size += int64(len(pt))
+               left -= int64(len(pt))
+               _, err = w.Write(pt)
+               if err != nil {
+                       return
+               }
+               n, err = io.ReadFull(r, ct)
+               if err != nil && err != io.ErrUnexpectedEOF {
+                       return
+               }
+               ctrIncr(nonce)
+               pt, err = aeadFull.Open(pt[:0], nonce, ct[:n], ad[:])
+               if err != nil {
+                       return
+               }
+       }
+       size += left
+       _, err = w.Write(pt[:left])
        if err != nil {
-               return sharedKey[:], their, written, err
+               return
+       }
+       pt = pt[left:]
+
+       if pktSize.Pad < uint64(len(pt)) {
+               err = errors.New("unexpected pad")
+               return
        }
-       if written != size {
-               return sharedKey[:], their, written, io.ErrUnexpectedEOF
+       for i := 0; i < len(pt); i++ {
+               if pt[i] != 0 {
+                       err = errors.New("non-zero pad byte")
+                       return
+               }
        }
-       return sharedKey[:], their, size, nil
+       sizePad := int64(pktSize.Pad) - int64(len(pt))
+       if sizePad == 0 {
+               return
+       }
+
+       keyPad := make([]byte, chacha20poly1305.KeySize)
+       blake3.DeriveKey(keyPad, DeriveKeyPadCtx, sharedKey[:])
+       xof := blake3.New(32, keyPad).XOF()
+       pt = make([]byte, len(ct))
+       for sizePad > 0 {
+               n, err = io.ReadFull(r, ct)
+               if err != nil && err != io.ErrUnexpectedEOF {
+                       return
+               }
+               _, err = io.ReadFull(xof, pt[:n])
+               if err != nil {
+                       panic(err)
+               }
+               if bytes.Compare(ct[:n], pt[:n]) != 0 {
+                       err = errors.New("wrong pad value")
+                       return
+               }
+               sizePad -= int64(n)
+       }
+       if sizePad < 0 {
+               err = errors.New("excess pad")
+       }
+       return
 }
index 62efa71fdfe60e0f86903b2ebe91a3af7710eb7f..4f5919080cf0c296950c61b62291f4e2d57636a9 100644 (file)
@@ -19,6 +19,8 @@ package nncp
 
 import (
        "bytes"
+       "crypto/rand"
+       "io"
        "testing"
        "testing/quick"
 
@@ -34,24 +36,37 @@ func TestPktEncWrite(t *testing.T) {
        if err != nil {
                panic(err)
        }
-       f := func(path string, pathSize uint8, data [1 << 16]byte, size, padSize uint16) bool {
-               dataR := bytes.NewReader(data[:])
+       f := func(
+               path string,
+               pathSize uint8,
+               dataSize uint32,
+               size, minSize uint16,
+               wrappers uint8,
+       ) bool {
+               dataSize %= 1 << 20
+               data := make([]byte, dataSize)
+               if _, err = io.ReadFull(rand.Reader, data); err != nil {
+                       panic(err)
+               }
                var ct bytes.Buffer
                if len(path) > int(pathSize) {
                        path = path[:int(pathSize)]
                }
-               pkt, err := NewPkt(PktTypeFile, 123, []byte(path))
+               nice := uint8(123)
+               pkt, err := NewPkt(PktTypeFile, nice, []byte(path))
                if err != nil {
                        panic(err)
                }
-               _, err = PktEncWrite(
+               wrappers %= 8
+               _, _, err = PktEncWrite(
                        nodeOur,
                        nodeTheir.Their(),
                        pkt,
-                       123,
-                       int64(size),
-                       int64(padSize),
-                       dataR,
+                       nice,
+                       int64(minSize),
+                       MaxFileSize,
+                       int(wrappers),
+                       bytes.NewReader(data),
                        &ct,
                )
                if err != nil {
@@ -83,32 +98,39 @@ func TestPktEncRead(t *testing.T) {
        f := func(
                path string,
                pathSize uint8,
-               data [1 << 16]byte,
-               size, padSize uint16,
-               junk []byte) bool {
-               dataR := bytes.NewReader(data[:])
+               dataSize uint32,
+               minSize uint16,
+               wrappers uint8,
+       ) bool {
+               dataSize %= 1 << 20
+               data := make([]byte, dataSize)
+               if _, err = io.ReadFull(rand.Reader, data); err != nil {
+                       panic(err)
+               }
                var ct bytes.Buffer
                if len(path) > int(pathSize) {
                        path = path[:int(pathSize)]
                }
-               pkt, err := NewPkt(PktTypeFile, 123, []byte(path))
+               nice := uint8(123)
+               pkt, err := NewPkt(PktTypeFile, nice, []byte(path))
                if err != nil {
                        panic(err)
                }
-               _, err = PktEncWrite(
+               wrappers %= 8
+               _, _, err = PktEncWrite(
                        node1,
                        node2.Their(),
                        pkt,
-                       123,
-                       int64(size),
-                       int64(padSize),
-                       dataR,
+                       nice,
+                       int64(minSize),
+                       MaxFileSize,
+                       int(wrappers),
+                       bytes.NewReader(data),
                        &ct,
                )
                if err != nil {
                        return false
                }
-               ct.Write(junk)
                var pt bytes.Buffer
                nodes := make(map[NodeId]*Node)
                nodes[*node1.Id] = node1.Their()
@@ -119,12 +141,12 @@ func TestPktEncRead(t *testing.T) {
                if *node.Id != *node1.Id {
                        return false
                }
-               if sizeGot != sizeWithTags(PktOverhead+int64(size)) {
+               if sizeGot != int64(len(data)+int(PktOverhead)) {
                        return false
                }
                var pktBuf bytes.Buffer
                xdr.Marshal(&pktBuf, &pkt)
-               return bytes.Compare(pt.Bytes(), append(pktBuf.Bytes(), data[:int(size)]...)) == 0
+               return bytes.Compare(pt.Bytes(), append(pktBuf.Bytes(), data...)) == 0
        }
        if err := quick.Check(f, nil); err != nil {
                t.Error(err)
index 5ebc72bdb20b2e16df5ba53ed33bf0669f5a13b5..b11a3ac3f0b1bbdc1c4705250bf9db3889529572 100644 (file)
@@ -25,7 +25,7 @@ import (
        "time"
 
        "github.com/dustin/go-humanize"
-       "go.cypherpunks.ru/nncp/v7/uilive"
+       "go.cypherpunks.ru/nncp/v8/uilive"
 )
 
 func init() {
@@ -114,6 +114,18 @@ func CopyProgressed(
                        break
                }
        }
+       if showPrgrs {
+               for _, le := range les {
+                       if le.K == "FullSize" {
+                               if le.V.(int64) == 0 {
+                                       Progress(prgrsPrefix, append(
+                                               les, LE{"Size", written}, LE{"FullSize", written},
+                                       ))
+                               }
+                               break
+                       }
+               }
+       }
        return
 }
 
@@ -146,7 +158,7 @@ func Progress(prefix string, les LEs) {
        }
        what = prefix + " " + what
        pb.Render(what, size)
-       if size >= fullsize {
+       if fullsize != 0 && size >= fullsize {
                pb.Kill()
                progressBarsLock.Lock()
                delete(progressBars, pkt)
index 4037831b59618319fdee6083127d91b83af21622..0c27289e2cf55359ed8398f58d086c1b1eb97c88 100644 (file)
@@ -157,9 +157,12 @@ func jobProcess(
                        } else {
                                cmd.Stdin = pipeR
                        }
-                       output, err := cmd.Output()
+                       output, err := cmd.CombinedOutput()
                        if err != nil {
-                               ctx.LogE("rx-hande", les, err, func(les LEs) string {
+                               les = append(les, LE{"Output", strings.Split(
+                                       strings.Trim(string(output), "\n"), "\n"),
+                               })
+                               ctx.LogE("rx-handle", les, err, func(les LEs) string {
                                        return fmt.Sprintf(
                                                "Tossing exec %s/%s (%s): %s: handling",
                                                sender.Name, pktName,
@@ -616,11 +619,11 @@ func jobProcess(
                                if err != nil {
                                        panic(err)
                                }
-                               if _, err = ctx.Tx(
+                               if _, _, err = ctx.Tx(
                                        node,
                                        pktTrns,
                                        nice,
-                                       int64(pktSize), 0,
+                                       int64(pktSize), 0, MaxFileSize,
                                        pipeR,
                                        pktName,
                                        nil,
@@ -747,8 +750,14 @@ func jobProcess(
                                }
                                if nodeId != sender.Id && nodeId != pktEnc.Sender {
                                        ctx.LogI("rx-area-echo", lesEcho, logMsgNode)
-                                       if _, err = ctx.Tx(
-                                               node, &pkt, nice, int64(pktSize), 0, fullPipeR, pktName, nil,
+                                       if _, _, err = ctx.Tx(
+                                               node,
+                                               &pkt,
+                                               nice,
+                                               int64(pktSize), 0, MaxFileSize,
+                                               fullPipeR,
+                                               pktName,
+                                               nil,
                                        ); err != nil {
                                                ctx.LogE("rx-area", lesEcho, err, logMsgNode)
                                                return err
@@ -859,6 +868,7 @@ func jobProcess(
                                nil,
                        )
                        if err != nil {
+                               ctx.LogE("rx-area-pkt-enc-read2", les, err, logMsg)
                                pipeW.CloseWithError(err)
                                <-errs
                                return err
index deb70f13d04553e1688daecd80e08206b510e1ea..299e80f77d0166a13fe2097c7396a4c265600424 100644 (file)
@@ -97,8 +97,7 @@ func TestTossExec(t *testing.T) {
                                handle,
                                []string{"arg0", "arg1"},
                                strings.NewReader("BODY\n"),
-                               1<<15,
-                               false,
+                               1<<15, MaxFileSize,
                                false,
                                nil,
                        ); err != nil {
@@ -165,6 +164,10 @@ func TestTossFile(t *testing.T) {
                }
                files := make(map[string][]byte)
                for i, fileSize := range fileSizes {
+                       if fileSize == 0 {
+                               // to prevent chunked send
+                               fileSize++
+                       }
                        data := make([]byte, fileSize)
                        if _, err := io.ReadFull(rand.Reader, data); err != nil {
                                panic(err)
@@ -222,8 +225,10 @@ func TestTossFile(t *testing.T) {
                        return false
                }
                ctx.Neigh[*nodeOur.Id].Incoming = &incomingPath
-               ctx.Toss(ctx.Self.Id, TRx, DefaultNiceFile,
-                       false, false, false, false, false, false, false)
+               if ctx.Toss(ctx.Self.Id, TRx, DefaultNiceFile,
+                       false, false, false, false, false, false, false) {
+                       return false
+               }
                if len(dirFiles(rxPath)) != 0 {
                        return false
                }
@@ -347,6 +352,10 @@ func TestTossFreq(t *testing.T) {
                ctx.Neigh[*nodeOur.Id] = nodeOur.Their()
                files := make(map[string][]byte)
                for i, fileSize := range fileSizes {
+                       if fileSize == 0 {
+                               // to prevent chunked send
+                               fileSize++
+                       }
                        fileData := make([]byte, fileSize)
                        if _, err := io.ReadFull(rand.Reader, fileData); err != nil {
                                panic(err)
@@ -472,13 +481,12 @@ func TestTossTrns(t *testing.T) {
                        }
                        copy(pktTrans.Path[:], nodeOur.Id[:])
                        var dst bytes.Buffer
-                       if _, err := PktEncWrite(
+                       if _, _, err := PktEncWrite(
                                ctx.Self,
                                ctx.Neigh[*nodeOur.Id],
                                &pktTrans,
                                123,
-                               int64(len(data)),
-                               0,
+                               0, MaxFileSize, 1,
                                bytes.NewReader(data),
                                &dst,
                        ); err != nil {
index e096ed65f873ddeb48f9ac635a01bbeea8849ba2..0f7d2b8d9369627d4cbf52eb63b996b5874a22a5 100644 (file)
--- a/src/tx.go
+++ b/src/tx.go
@@ -21,12 +21,9 @@ import (
        "archive/tar"
        "bufio"
        "bytes"
-       "crypto/rand"
        "errors"
        "fmt"
-       "hash"
        "io"
-       "io/ioutil"
        "os"
        "path/filepath"
        "strconv"
@@ -37,7 +34,6 @@ import (
        "github.com/dustin/go-humanize"
        "github.com/klauspost/compress/zstd"
        "golang.org/x/crypto/blake2b"
-       "golang.org/x/crypto/chacha20poly1305"
 )
 
 const (
@@ -47,20 +43,26 @@ const (
        TarExt       = ".tar"
 )
 
+type PktEncWriteResult struct {
+       pktEncRaw []byte
+       size      int64
+       err       error
+}
+
 func (ctx *Ctx) Tx(
        node *Node,
        pkt *Pkt,
        nice uint8,
-       size, minSize int64,
+       srcSize, minSize, maxSize int64,
        src io.Reader,
        pktName string,
        areaId *AreaId,
-) (*Node, error) {
+) (*Node, int64, error) {
        var area *Area
        if areaId != nil {
                area = ctx.AreaId2Area[*areaId]
                if area.Prv == nil {
-                       return nil, errors.New("area has no encryption keys")
+                       return nil, 0, errors.New("area has no encryption keys")
                }
        }
        hops := make([]*Node, 0, 1+len(node.Via))
@@ -70,85 +72,76 @@ func (ctx *Ctx) Tx(
                lastNode = ctx.Neigh[*node.Via[i-1]]
                hops = append(hops, lastNode)
        }
-       expectedSize := size
        wrappers := len(hops)
        if area != nil {
                wrappers++
        }
-       for i := 0; i < wrappers; i++ {
-               expectedSize = PktEncOverhead +
-                       PktSizeOverhead +
-                       sizeWithTags(PktOverhead+expectedSize)
-       }
-       padSize := minSize - expectedSize
-       if padSize < 0 {
-               padSize = 0
-       }
-       if !ctx.IsEnoughSpace(size + padSize) {
-               return nil, errors.New("is not enough space")
+       var expectedSize int64
+       if srcSize > 0 {
+               expectedSize = srcSize + PktOverhead
+               expectedSize += sizePadCalc(expectedSize, minSize, wrappers)
+               expectedSize = PktEncOverhead + sizeWithTags(expectedSize)
+               if maxSize != 0 && expectedSize > maxSize {
+                       return nil, 0, TooBig
+               }
+               if !ctx.IsEnoughSpace(expectedSize) {
+                       return nil, 0, errors.New("is not enough space")
+               }
        }
        tmp, err := ctx.NewTmpFileWHash()
        if err != nil {
-               return nil, err
+               return nil, 0, err
        }
 
-       errs := make(chan error)
-       pktEncRaws := make(chan []byte)
-       curSize := size
+       results := make(chan PktEncWriteResult)
        pipeR, pipeW := io.Pipe()
        var pipeRPrev io.Reader
        if area == nil {
-               go func(size int64, src io.Reader, dst io.WriteCloser) {
+               go func(src io.Reader, dst io.WriteCloser) {
                        ctx.LogD("tx", LEs{
                                {"Node", hops[0].Id},
                                {"Nice", int(nice)},
-                               {"Size", size},
+                               {"Size", expectedSize},
                        }, func(les LEs) string {
                                return fmt.Sprintf(
-                                       "Tx packet to %s (%s) nice: %s",
+                                       "Tx packet to %s (source %s) nice: %s",
                                        ctx.NodeName(hops[0].Id),
-                                       humanize.IBytes(uint64(size)),
+                                       humanize.IBytes(uint64(expectedSize)),
                                        NicenessFmt(nice),
                                )
                        })
-                       pktEncRaw, err := PktEncWrite(
-                               ctx.Self, hops[0], pkt, nice, size, padSize, src, dst,
+                       pktEncRaw, size, err := PktEncWrite(
+                               ctx.Self, hops[0], pkt, nice, minSize, maxSize, wrappers, src, dst,
                        )
-                       pktEncRaws <- pktEncRaw
-                       errs <- err
+                       results <- PktEncWriteResult{pktEncRaw, size, err}
                        dst.Close()
-               }(curSize, src, pipeW)
-               curSize = PktEncOverhead + PktSizeOverhead + sizeWithTags(PktOverhead+curSize)
-               curSize += padSize
+               }(src, pipeW)
        } else {
-               go func(size, padSize int64, src io.Reader, dst io.WriteCloser) {
+               go func(src io.Reader, dst io.WriteCloser) {
                        ctx.LogD("tx", LEs{
                                {"Area", area.Id},
                                {"Nice", int(nice)},
-                               {"Size", size},
+                               {"Size", expectedSize},
                        }, func(les LEs) string {
                                return fmt.Sprintf(
-                                       "Tx area packet to %s (%s) nice: %s",
+                                       "Tx area packet to %s (source %s) nice: %s",
                                        ctx.AreaName(areaId),
-                                       humanize.IBytes(uint64(size)),
+                                       humanize.IBytes(uint64(expectedSize)),
                                        NicenessFmt(nice),
                                )
                        })
                        areaNode := Node{Id: new(NodeId), ExchPub: new([32]byte)}
                        copy(areaNode.Id[:], area.Id[:])
                        copy(areaNode.ExchPub[:], area.Pub[:])
-                       pktEncRaw, err := PktEncWrite(
-                               ctx.Self, &areaNode, pkt, nice, size, padSize, src, dst,
+                       pktEncRaw, size, err := PktEncWrite(
+                               ctx.Self, &areaNode, pkt, nice, 0, maxSize, 0, src, dst,
                        )
-                       pktEncRaws <- pktEncRaw
-                       errs <- err
+                       results <- PktEncWriteResult{pktEncRaw, size, err}
                        dst.Close()
-               }(curSize, padSize, src, pipeW)
-               curSize = PktEncOverhead + PktSizeOverhead + sizeWithTags(PktOverhead+curSize)
-               curSize += padSize
+               }(src, pipeW)
                pipeRPrev = pipeR
                pipeR, pipeW = io.Pipe()
-               go func(size int64, src io.Reader, dst io.WriteCloser) {
+               go func(src io.Reader, dst io.WriteCloser) {
                        pktArea, err := NewPkt(PktTypeArea, 0, area.Id[:])
                        if err != nil {
                                panic(err)
@@ -156,23 +149,21 @@ func (ctx *Ctx) Tx(
                        ctx.LogD("tx", LEs{
                                {"Node", hops[0].Id},
                                {"Nice", int(nice)},
-                               {"Size", size},
+                               {"Size", expectedSize},
                        }, func(les LEs) string {
                                return fmt.Sprintf(
-                                       "Tx packet to %s (%s) nice: %s",
+                                       "Tx packet to %s (source %s) nice: %s",
                                        ctx.NodeName(hops[0].Id),
-                                       humanize.IBytes(uint64(size)),
+                                       humanize.IBytes(uint64(expectedSize)),
                                        NicenessFmt(nice),
                                )
                        })
-                       pktEncRaw, err := PktEncWrite(
-                               ctx.Self, hops[0], pktArea, nice, size, 0, src, dst,
+                       pktEncRaw, size, err := PktEncWrite(
+                               ctx.Self, hops[0], pktArea, nice, minSize, maxSize, wrappers, src, dst,
                        )
-                       pktEncRaws <- pktEncRaw
-                       errs <- err
+                       results <- PktEncWriteResult{pktEncRaw, size, err}
                        dst.Close()
-               }(curSize, pipeRPrev, pipeW)
-               curSize = PktEncOverhead + PktSizeOverhead + sizeWithTags(PktOverhead+curSize)
+               }(pipeRPrev, pipeW)
        }
        for i := 1; i < len(hops); i++ {
                pktTrns, err := NewPkt(PktTypeTrns, 0, hops[i-1].Id[:])
@@ -181,54 +172,54 @@ func (ctx *Ctx) Tx(
                }
                pipeRPrev = pipeR
                pipeR, pipeW = io.Pipe()
-               go func(node *Node, pkt *Pkt, size int64, src io.Reader, dst io.WriteCloser) {
+               go func(node *Node, pkt *Pkt, src io.Reader, dst io.WriteCloser) {
                        ctx.LogD("tx", LEs{
                                {"Node", node.Id},
                                {"Nice", int(nice)},
-                               {"Size", size},
                        }, func(les LEs) string {
                                return fmt.Sprintf(
-                                       "Tx trns packet to %s (%s) nice: %s",
+                                       "Tx trns packet to %s nice: %s",
                                        ctx.NodeName(node.Id),
-                                       humanize.IBytes(uint64(size)),
                                        NicenessFmt(nice),
                                )
                        })
-                       pktEncRaw, err := PktEncWrite(ctx.Self, node, pkt, nice, size, 0, src, dst)
-                       pktEncRaws <- pktEncRaw
-                       errs <- err
+                       pktEncRaw, size, err := PktEncWrite(
+                               ctx.Self, node, pkt, nice, 0, MaxFileSize, 0, src, dst,
+                       )
+                       results <- PktEncWriteResult{pktEncRaw, size, err}
                        dst.Close()
-               }(hops[i], pktTrns, curSize, pipeRPrev, pipeW)
-               curSize = PktEncOverhead + PktSizeOverhead + sizeWithTags(PktOverhead+curSize)
+               }(hops[i], pktTrns, pipeRPrev, pipeW)
        }
        go func() {
                _, err := CopyProgressed(
                        tmp.W, pipeR, "Tx",
-                       LEs{{"Pkt", pktName}, {"FullSize", curSize}},
+                       LEs{{"Pkt", pktName}, {"FullSize", expectedSize}},
                        ctx.ShowPrgrs,
                )
-               errs <- err
+               results <- PktEncWriteResult{err: err}
        }()
        var pktEncRaw []byte
        var pktEncMsg []byte
        if area != nil {
-               pktEncMsg = <-pktEncRaws
-       }
-       for i := 0; i < len(hops); i++ {
-               pktEncRaw = <-pktEncRaws
+               pktEncMsg = (<-results).pktEncRaw
        }
+       var finalSize int64
        for i := 0; i <= wrappers; i++ {
-               err = <-errs
-               if err != nil {
+               r := <-results
+               if r.err != nil {
                        tmp.Fd.Close()
-                       return nil, err
+                       return nil, 0, err
+               }
+               if r.pktEncRaw != nil {
+                       finalSize = r.size
+                       pktEncRaw = r.pktEncRaw
                }
        }
        nodePath := filepath.Join(ctx.Spool, lastNode.Id.String())
        err = tmp.Commit(filepath.Join(nodePath, string(TTx)))
        os.Symlink(nodePath, filepath.Join(ctx.Spool, lastNode.Name))
        if err != nil {
-               return lastNode, err
+               return lastNode, 0, err
        }
        if ctx.HdrUsage {
                ctx.HdrWrite(pktEncRaw, filepath.Join(nodePath, string(TTx), tmp.Checksum()))
@@ -243,15 +234,15 @@ func (ctx *Ctx) Tx(
                les := LEs{
                        {"Node", node.Id},
                        {"Nice", int(nice)},
-                       {"Size", size},
+                       {"Size", expectedSize},
                        {"Area", areaId},
                        {"AreaMsg", msgHash},
                }
                logMsg := func(les LEs) string {
                        return fmt.Sprintf(
-                               "Tx area packet to %s (%s) nice: %s, area %s: %s",
+                               "Tx area packet to %s (source %s) nice: %s, area %s: %s",
                                ctx.NodeName(node.Id),
-                               humanize.IBytes(uint64(size)),
+                               humanize.IBytes(uint64(expectedSize)),
                                NicenessFmt(nice),
                                area.Name,
                                msgHash,
@@ -259,84 +250,34 @@ func (ctx *Ctx) Tx(
                }
                if err = ensureDir(seenDir); err != nil {
                        ctx.LogE("tx-mkdir", les, err, logMsg)
-                       return lastNode, err
+                       return lastNode, 0, err
                }
                if fd, err := os.Create(seenPath); err == nil {
                        fd.Close()
                        if err = DirSync(seenDir); err != nil {
                                ctx.LogE("tx-dirsync", les, err, logMsg)
-                               return lastNode, err
+                               return lastNode, 0, err
                        }
                }
                ctx.LogI("tx-area", les, logMsg)
        }
-       return lastNode, err
+       return lastNode, finalSize, err
 }
 
 type DummyCloser struct{}
 
 func (dc DummyCloser) Close() error { return nil }
 
-func throughTmpFile(r io.Reader) (
-       reader io.Reader,
-       closer io.Closer,
-       fileSize int64,
-       rerr error,
-) {
-       src, err := ioutil.TempFile("", "nncp-file")
-       if err != nil {
-               rerr = err
-               return
-       }
-       os.Remove(src.Name())
-       tmpW := bufio.NewWriter(src)
-       tmpKey := make([]byte, chacha20poly1305.KeySize)
-       if _, rerr = rand.Read(tmpKey[:]); rerr != nil {
-               return
-       }
-       aead, err := chacha20poly1305.New(tmpKey)
-       if err != nil {
-               rerr = err
-               return
-       }
-       nonce := make([]byte, aead.NonceSize())
-       written, err := aeadProcess(aead, nonce, nil, true, r, tmpW)
-       if err != nil {
-               rerr = err
-               return
-       }
-       fileSize = int64(written)
-       if err = tmpW.Flush(); err != nil {
-               rerr = err
-               return
-       }
-       if _, err = src.Seek(0, io.SeekStart); err != nil {
-               rerr = err
-               return
-       }
-       r, w := io.Pipe()
-       go func() {
-               for i := 0; i < aead.NonceSize(); i++ {
-                       nonce[i] = 0
-               }
-               if _, err := aeadProcess(aead, nonce, nil, false, bufio.NewReader(src), w); err != nil {
-                       w.CloseWithError(err)
-               }
-       }()
-       reader = r
-       closer = src
-       return
-}
-
 func prepareTxFile(srcPath string) (
        reader io.Reader,
        closer io.Closer,
-       fileSize int64,
+       srcSize int64,
        archived bool,
        rerr error,
 ) {
        if srcPath == "-" {
-               reader, closer, fileSize, rerr = throughTmpFile(bufio.NewReader(os.Stdin))
+               reader = os.Stdin
+               closer = os.Stdin
                return
        }
 
@@ -354,9 +295,9 @@ func prepareTxFile(srcPath string) (
                        rerr = err
                        return
                }
-               fileSize = srcStat.Size()
-               reader = bufio.NewReader(src)
+               reader = src
                closer = src
+               srcSize = srcStat.Size()
                return
        }
 
@@ -386,13 +327,13 @@ func prepareTxFile(srcPath string) (
                }
                if info.IsDir() {
                        // directory header, PAX record header+contents
-                       fileSize += TarBlockSize + 2*TarBlockSize
+                       srcSize += TarBlockSize + 2*TarBlockSize
                        dirs = append(dirs, einfo{path: path, modTime: info.ModTime()})
                } else {
                        // file header, PAX record header+contents, file content
-                       fileSize += TarBlockSize + 2*TarBlockSize + info.Size()
+                       srcSize += TarBlockSize + 2*TarBlockSize + info.Size()
                        if n := info.Size() % TarBlockSize; n != 0 {
-                               fileSize += TarBlockSize - n // padding
+                               srcSize += TarBlockSize - n // padding
                        }
                        files = append(files, einfo{
                                path:    path,
@@ -409,7 +350,7 @@ func prepareTxFile(srcPath string) (
        r, w := io.Pipe()
        reader = r
        closer = DummyCloser{}
-       fileSize += 2 * TarBlockSize // termination block
+       srcSize += 2 * TarBlockSize // termination block
 
        go func() error {
                tarWr := tar.NewWriter(w)
@@ -460,8 +401,7 @@ func (ctx *Ctx) TxFile(
        node *Node,
        nice uint8,
        srcPath, dstPath string,
-       chunkSize int64,
-       minSize, maxSize int64,
+       chunkSize, minSize, maxSize int64,
        areaId *AreaId,
 ) error {
        dstPathSpecified := false
@@ -477,39 +417,40 @@ func (ctx *Ctx) TxFile(
        if filepath.IsAbs(dstPath) {
                return errors.New("Relative destination path required")
        }
-       reader, closer, fileSize, archived, err := prepareTxFile(srcPath)
+       reader, closer, srcSize, archived, err := prepareTxFile(srcPath)
        if closer != nil {
                defer closer.Close()
        }
        if err != nil {
                return err
        }
-       if fileSize > maxSize {
-               return errors.New("Too big than allowed")
-       }
        if archived && !dstPathSpecified {
                dstPath += TarExt
        }
 
-       if fileSize <= chunkSize {
+       if chunkSize == 0 || (srcSize > 0 && srcSize <= chunkSize) {
                pkt, err := NewPkt(PktTypeFile, nice, []byte(dstPath))
                if err != nil {
                        return err
                }
-               _, err = ctx.Tx(node, pkt, nice, fileSize, minSize, reader, dstPath, areaId)
+               _, finalSize, err := ctx.Tx(
+                       node, pkt, nice,
+                       srcSize, minSize, maxSize,
+                       bufio.NewReader(reader), dstPath, areaId,
+               )
                les := LEs{
                        {"Type", "file"},
                        {"Node", node.Id},
                        {"Nice", int(nice)},
                        {"Src", srcPath},
                        {"Dst", dstPath},
-                       {"Size", fileSize},
+                       {"Size", finalSize},
                }
                logMsg := func(les LEs) string {
                        return fmt.Sprintf(
                                "File %s (%s) sent to %s:%s",
                                srcPath,
-                               humanize.IBytes(uint64(fileSize)),
+                               humanize.IBytes(uint64(finalSize)),
                                ctx.NodeName(node.Id),
                                dstPath,
                        )
@@ -522,57 +463,38 @@ func (ctx *Ctx) TxFile(
                return err
        }
 
-       leftSize := fileSize
-       metaPkt := ChunkedMeta{
-               Magic:     MagicNNCPMv2.B,
-               FileSize:  uint64(fileSize),
-               ChunkSize: uint64(chunkSize),
-               Checksums: make([][MTHSize]byte, 0, (fileSize/chunkSize)+1),
-       }
-       for i := int64(0); i < (fileSize/chunkSize)+1; i++ {
-               hsh := new([MTHSize]byte)
-               metaPkt.Checksums = append(metaPkt.Checksums, *hsh)
-       }
-       var sizeToSend int64
-       var hsh hash.Hash
-       var pkt *Pkt
+       br := bufio.NewReader(reader)
+       var sizeFull int64
        var chunkNum int
-       var path string
+       checksums := [][MTHSize]byte{}
        for {
-               if leftSize <= chunkSize {
-                       sizeToSend = leftSize
-               } else {
-                       sizeToSend = chunkSize
-               }
-               path = dstPath + ChunkedSuffixPart + strconv.Itoa(chunkNum)
-               pkt, err = NewPkt(PktTypeFile, nice, []byte(path))
+               lr := io.LimitReader(br, chunkSize)
+               path := dstPath + ChunkedSuffixPart + strconv.Itoa(chunkNum)
+               pkt, err := NewPkt(PktTypeFile, nice, []byte(path))
                if err != nil {
                        return err
                }
-               hsh = MTHNew(0, 0)
-               _, err = ctx.Tx(
-                       node,
-                       pkt,
-                       nice,
-                       sizeToSend,
-                       minSize,
-                       io.TeeReader(reader, hsh),
-                       path,
-                       areaId,
+               hsh := MTHNew(0, 0)
+               _, size, err := ctx.Tx(
+                       node, pkt, nice,
+                       0, minSize, maxSize,
+                       io.TeeReader(lr, hsh),
+                       path, areaId,
                )
+
                les := LEs{
                        {"Type", "file"},
                        {"Node", node.Id},
                        {"Nice", int(nice)},
                        {"Src", srcPath},
                        {"Dst", path},
-                       {"Size", sizeToSend},
+                       {"Size", size},
                }
                logMsg := func(les LEs) string {
                        return fmt.Sprintf(
                                "File %s (%s) sent to %s:%s",
                                srcPath,
-                               humanize.IBytes(uint64(sizeToSend)),
+                               humanize.IBytes(uint64(size)),
                                ctx.NodeName(node.Id),
                                path,
                        )
@@ -583,25 +505,44 @@ func (ctx *Ctx) TxFile(
                        ctx.LogE("tx", les, err, logMsg)
                        return err
                }
-               hsh.Sum(metaPkt.Checksums[chunkNum][:0])
-               leftSize -= sizeToSend
+
+               sizeFull += size - PktOverhead
+               var checksum [MTHSize]byte
+               hsh.Sum(checksum[:0])
+               checksums = append(checksums, checksum)
                chunkNum++
-               if leftSize == 0 {
+               if size < chunkSize {
+                       break
+               }
+               if _, err = br.Peek(1); err != nil {
                        break
                }
        }
-       var metaBuf bytes.Buffer
-       _, err = xdr.Marshal(&metaBuf, metaPkt)
+
+       metaPkt := ChunkedMeta{
+               Magic:     MagicNNCPMv2.B,
+               FileSize:  uint64(sizeFull),
+               ChunkSize: uint64(chunkSize),
+               Checksums: checksums,
+       }
+       var buf bytes.Buffer
+       _, err = xdr.Marshal(&buf, metaPkt)
        if err != nil {
                return err
        }
-       path = dstPath + ChunkedSuffixMeta
-       pkt, err = NewPkt(PktTypeFile, nice, []byte(path))
+       path := dstPath + ChunkedSuffixMeta
+       pkt, err := NewPkt(PktTypeFile, nice, []byte(path))
        if err != nil {
                return err
        }
-       metaPktSize := int64(metaBuf.Len())
-       _, err = ctx.Tx(node, pkt, nice, metaPktSize, minSize, &metaBuf, path, areaId)
+       metaPktSize := int64(buf.Len())
+       _, _, err = ctx.Tx(
+               node,
+               pkt,
+               nice,
+               metaPktSize, minSize, maxSize,
+               &buf, path, areaId,
+       )
        les := LEs{
                {"Type", "file"},
                {"Node", node.Id},
@@ -631,7 +572,8 @@ func (ctx *Ctx) TxFreq(
        node *Node,
        nice, replyNice uint8,
        srcPath, dstPath string,
-       minSize int64) error {
+       minSize int64,
+) error {
        dstPath = filepath.Clean(dstPath)
        if filepath.IsAbs(dstPath) {
                return errors.New("Relative destination path required")
@@ -646,7 +588,7 @@ func (ctx *Ctx) TxFreq(
        }
        src := strings.NewReader(dstPath)
        size := int64(src.Len())
-       _, err = ctx.Tx(node, pkt, nice, size, minSize, src, srcPath, nil)
+       _, _, err = ctx.Tx(node, pkt, nice, size, minSize, MaxFileSize, src, srcPath, nil)
        les := LEs{
                {"Type", "freq"},
                {"Node", node.Id},
@@ -676,8 +618,7 @@ func (ctx *Ctx) TxExec(
        handle string,
        args []string,
        in io.Reader,
-       minSize int64,
-       useTmp bool,
+       minSize int64, maxSize int64,
        noCompress bool,
        areaId *AreaId,
 ) error {
@@ -690,82 +631,34 @@ func (ctx *Ctx) TxExec(
        if noCompress {
                pktType = PktTypeExecFat
        }
-       pkt, rerr := NewPkt(pktType, replyNice, bytes.Join(path, []byte{0}))
-       if rerr != nil {
-               return rerr
-       }
-       var size int64
-
-       if !noCompress && !useTmp {
-               var compressed bytes.Buffer
-               compressor, err := zstd.NewWriter(
-                       &compressed,
-                       zstd.WithEncoderLevel(zstd.SpeedDefault),
-               )
-               if err != nil {
-                       return err
-               }
-               if _, err = io.Copy(compressor, in); err != nil {
-                       compressor.Close()
-                       return err
-               }
-               if err = compressor.Close(); err != nil {
-                       return err
-               }
-               size = int64(compressed.Len())
-               _, rerr = ctx.Tx(node, pkt, nice, size, minSize, &compressed, handle, areaId)
-       }
-       if noCompress && !useTmp {
-               var data bytes.Buffer
-               if _, err := io.Copy(&data, in); err != nil {
-                       return err
-               }
-               size = int64(data.Len())
-               _, rerr = ctx.Tx(node, pkt, nice, size, minSize, &data, handle, areaId)
+       pkt, err := NewPkt(pktType, replyNice, bytes.Join(path, []byte{0}))
+       if err != nil {
+               return err
        }
-       if !noCompress && useTmp {
-               r, w := io.Pipe()
-               compressor, err := zstd.NewWriter(w, zstd.WithEncoderLevel(zstd.SpeedDefault))
+       compressErr := make(chan error, 1)
+       if !noCompress {
+               pr, pw := io.Pipe()
+               compressor, err := zstd.NewWriter(pw, zstd.WithEncoderLevel(zstd.SpeedDefault))
                if err != nil {
                        return err
                }
-               copyErr := make(chan error)
-               go func() {
-                       _, err := io.Copy(compressor, in)
-                       if err != nil {
-                               compressor.Close()
-                               copyErr <- err
+               go func(r io.Reader) {
+                       if _, err := io.Copy(compressor, r); err != nil {
+                               compressErr <- err
+                               return
                        }
-                       err = compressor.Close()
-                       w.Close()
-                       copyErr <- err
-               }()
-               tmpReader, closer, fileSize, err := throughTmpFile(r)
-               if closer != nil {
-                       defer closer.Close()
-               }
-               if err != nil {
-                       return err
-               }
-               err = <-copyErr
-               if err != nil {
-                       return err
-               }
-               size = fileSize
-               _, rerr = ctx.Tx(node, pkt, nice, size, minSize, tmpReader, handle, areaId)
-       }
-       if noCompress && useTmp {
-               tmpReader, closer, fileSize, err := throughTmpFile(in)
-               if closer != nil {
-                       defer closer.Close()
-               }
-               if err != nil {
-                       return err
+                       compressErr <- compressor.Close()
+                       pw.Close()
+               }(in)
+               in = pr
+       }
+       _, size, err := ctx.Tx(node, pkt, nice, 0, minSize, maxSize, in, handle, areaId)
+       if !noCompress {
+               e := <-compressErr
+               if err == nil {
+                       err = e
                }
-               size = fileSize
-               _, rerr = ctx.Tx(node, pkt, nice, size, minSize, tmpReader, handle, areaId)
        }
-
        dst := strings.Join(append([]string{handle}, args...), " ")
        les := LEs{
                {"Type", "exec"},
@@ -781,12 +674,12 @@ func (ctx *Ctx) TxExec(
                        ctx.NodeName(node.Id), dst, humanize.IBytes(uint64(size)),
                )
        }
-       if rerr == nil {
+       if err == nil {
                ctx.LogI("tx", les, logMsg)
        } else {
-               ctx.LogE("tx", les, rerr, logMsg)
+               ctx.LogE("tx", les, err, logMsg)
        }
-       return rerr
+       return err
 }
 
 func (ctx *Ctx) TxTrns(node *Node, nice uint8, size int64, src io.Reader) error {
index 85a00db69a416bbd6ae19cb6e9e5b569751f026f..20d20ee0d1b7907b6272822e8b4b1a369d5dd1a5 100644 (file)
@@ -19,11 +19,11 @@ package nncp
 
 import (
        "bytes"
+       "crypto/rand"
        "io"
        "io/ioutil"
        "os"
        "path"
-       "strings"
        "testing"
        "testing/quick"
 
@@ -31,12 +31,23 @@ import (
 )
 
 func TestTx(t *testing.T) {
-       f := func(hops uint8, pathSrc, data string, nice, replyNice uint8, padSize int16) bool {
+       f := func(
+               hops uint8,
+               pathSrc string,
+               dataSize uint32,
+               nice, replyNice uint8,
+               minSize uint32,
+       ) bool {
+               dataSize %= 1 << 20
+               data := make([]byte, dataSize)
+               if _, err := io.ReadFull(rand.Reader, data); err != nil {
+                       panic(err)
+               }
+               minSize %= 1 << 20
                if len(pathSrc) > int(MaxPathSize) {
                        pathSrc = pathSrc[:MaxPathSize]
                }
                hops = hops % 4
-               hops = 1
                spool, err := ioutil.TempDir("", "testtx")
                if err != nil {
                        panic(err)
@@ -75,13 +86,14 @@ func TestTx(t *testing.T) {
                        nodeTgt.Via = append(nodeTgt.Via, node.Id)
                }
                pkt, err := NewPkt(PktTypeExec, replyNice, []byte(pathSrc))
-               src := strings.NewReader(data)
-               dstNode, err := ctx.Tx(
+               src := bytes.NewReader(data)
+               dstNode, _, err := ctx.Tx(
                        nodeTgt,
                        pkt,
                        123,
                        int64(src.Len()),
-                       int64(padSize),
+                       int64(minSize),
+                       MaxFileSize,
                        src,
                        "pktName",
                        nil,