[submodule "src/golang.org/x/net"]
path = src/golang.org/x/net
url = https://go.googlesource.com/net
+[submodule "src/github.com/gorhill/cronexpr"]
+ path = src/github.com/gorhill/cronexpr
+ url = https://github.com/gorhill/cronexpr.git
--- /dev/null
+doc/news.texi
\ No newline at end of file
--- /dev/null
+There are people deserving to be thanked for helping this project:
+
+* Shawn K. Quinn <skquinn at rushpost dot com> for his descriptive
+ instructions about building NNCP under Ubuntu GNU/Linux distributions
+ and bug reports
-.PHONY: doc
+PREFIX ?= /usr/local
-CFGPATH ?= /usr/local/etc/nncp.yaml
SENDMAIL ?= /usr/sbin/sendmail
-LDFLAGS = \
- -X cypherpunks.ru/nncp.Version=$(VERSION) \
- -X cypherpunks.ru/nncp.DefaultCfgPath=$(CFGPATH) \
- -X cypherpunks.ru/nncp.DefaultSendmailPath=$(SENDMAIL)
-PREFIX ?= /usr/local
+CFGPATH ?= $(PREFIX)/etc/nncp.yaml
+SPOOLPATH ?= /var/spool/nncp
+LOGPATH ?= /var/spool/nncp/log
+
BINDIR = $(DESTDIR)$(PREFIX)/bin
INFODIR = $(DESTDIR)$(PREFIX)/info
DOCDIR = $(DESTDIR)$(PREFIX)/share/doc/nncp
+
+LDFLAGS = \
+ -X cypherpunks.ru/nncp.Version=$(VERSION) \
+ -X cypherpunks.ru/nncp.DefaultCfgPath=$(CFGPATH) \
+ -X cypherpunks.ru/nncp.DefaultSendmailPath=$(SENDMAIL) \
+ -X cypherpunks.ru/nncp.DefaultSpoolPath=$(SPOOLPATH) \
+ -X cypherpunks.ru/nncp.DefaultLogPath=$(LOGPATH)
+
ALL = \
nncp-mail \
nncp-call \
+ nncp-caller \
nncp-check \
nncp-daemon \
nncp-file \
nncp-call:
GOPATH=$(GOPATH) go build -ldflags "$(LDFLAGS)" cypherpunks.ru/nncp/cmd/nncp-call
+nncp-caller:
+ GOPATH=$(GOPATH) go build -ldflags "$(LDFLAGS)" cypherpunks.ru/nncp/cmd/nncp-caller
+
nncp-check:
GOPATH=$(GOPATH) go build -ldflags "$(LDFLAGS)" cypherpunks.ru/nncp/cmd/nncp-check
clean:
rm -f $(ALL)
+.PHONY: doc
+
doc:
$(MAKE) -C doc
cp -f doc/nncp.info $(INFODIR)
chmod 644 $(INFODIR)/nncp.info
mkdir -p $(DOCDIR)
- cp -f -L AUTHORS README $(DOCDIR)
+ cp -f -L AUTHORS NEWS README THANKS $(DOCDIR)
chmod 644 $(DOCDIR)/*
install-strip: install
--- /dev/null
+@node Call
+@unnumbered Call configuration
+
+Call is a rule when and how node can be called.
+
+Example list of call structures:
+
+@verbatim
+calls:
+ -
+ cron: "*/1 * * * 0-4"
+ onlinedeadline: 3600
+ nice: 64
+ -
+ cron: "30 * * * 5-6"
+ onlinedeadline: 1800
+ maxonlinetime: 1750
+ nice: 64
+ -
+ cron: "0 * * * 5-6"
+ xx: rx
+ addr: lan
+@end verbatim
+
+tells that on work days of the week call that node every minute,
+disconnect after an hour of inactivity and process only relatively high
+priority packets (presumably mail ones). So we connect and hold
+connection for very long time to pass only emails. On weekends call that
+node only each half-hour for processing high-priority packets. Also only
+on weekends try to connect to that node every hour only using LAN
+address and only receiving any (any priority) packets (assume that low
+priority huge file transmission are done additionally via offline
+connections).
+
+It contains the following fields (only @emph{cron} is required):
+
+@table @emph
+
+@item cron
+This is copy-pasted documentation from
+@code{github.com/gorhill/cronexpr} library used there.
+
+@multitable @columnfractions .2 .1 .2 .5
+@headitem Field name @tab Mandatory? @tab Allowed values @tab Allowed special characters
+
+@item Seconds @tab No @tab 0-59 @tab @verb{|* / , -|}
+@item Minutes @tab Yes @tab 0-59 @tab @verb{|* / , -|}
+@item Hours @tab Yes @tab 0-23 @tab @verb{|* / , -|}
+@item Day of month @tab Yes @tab 1-31 @tab @verb{|* / , - L W|}
+@item Month @tab Yes @tab 1-12 or JAN-DEC @tab @verb{|* / , -|}
+@item Day of week @tab Yes @tab 0-6 or SUN-SAT @tab @verb{|* / , - L #|}
+@item Year @tab No @tab 1970–2099 @tab @verb{|* / , -|}
+
+@end multitable
+
+@table @asis
+
+@item Asterisk (@verb{|*|})
+
+The asterisk indicates that the cron expression matches for all values
+of the field. E.g., using an asterisk in the 4th field (month) indicates
+every month.
+
+@item Slash (@verb{|/|})
+
+Slashes describe increments of ranges. For example @verb{|3-59/15|} in
+the minute field indicate the third minute of the hour and every 15
+minutes thereafter. The form @verb{|*/...|} is equivalent to the form
+"first-last/...", that is, an increment over the largest possible range
+of the field.
+
+@item Comma (@verb{|,|})
+
+Commas are used to separate items of a list. For example, using
+@verb{|MON,WED,FRI|} in the 5th field (day of week) means Mondays,
+Wednesdays and Fridays.
+
+@item Hyphen (@verb{|-|})
+
+Hyphens define ranges. For example, 2000-2010 indicates every year
+between 2000 and 2010 AD, inclusive.
+
+@item L
+
+@verb{|L|} stands for "last". When used in the day-of-week field, it
+allows you to specify constructs such as "the last Friday" (@verb{|5L|})
+of a given month. In the day-of-month field, it specifies the last day
+of the month.
+
+@item W
+
+The @verb{|W|} character is allowed for the day-of-month field. This
+character is used to specify the business day (Monday-Friday) nearest
+the given day. As an example, if you were to specify @verb{|15W|} as the
+value for the day-of-month field, the meaning is: "the nearest business
+day to the 15th of the month."
+
+So, if the 15th is a Saturday, the trigger fires on Friday the 14th. If
+the 15th is a Sunday, the trigger fires on Monday the 16th. If the 15th
+is a Tuesday, then it fires on Tuesday the 15th. However if you specify
+@verb{|1W|} as the value for day-of-month, and the 1st is a Saturday,
+the trigger fires on Monday the 3rd, as it does not 'jump' over the
+boundary of a month's days.
+
+The @verb{|W|} character can be specified only when the day-of-month is
+a single day, not a range or list of days.
+
+The @verb{|W|} character can also be combined with @verb{|L|}, i.e.
+@verb{|LW|} to mean "the last business day of the month."
+
+@item Hash (@verb{|#|})
+
+@verb{|#|} is allowed for the day-of-week field, and must be followed by
+a number between one and five. It allows you to specify constructs such
+as "the second Friday" of a given month.
+
+@end table
+
+Predefined cron expressions:
+
+@multitable @columnfractions .1 .75 .15
+@headitem Entry @tab Description @tab Equivalent to
+@item @verb{|@annually|} @tab
+ Run once a year at midnight in the morning of January 1 @tab
+ @verb{|0 0 0 1 1 * *|}
+@item @verb{|@yearly|} @tab
+ Run once a year at midnight in the morning of January 1 @tab
+ @verb{|0 0 0 1 1 * *|}
+@item @verb{|@monthly|} @tab
+ Run once a month at midnight in the morning of the first of the month @tab
+ @verb{|0 0 0 1 * * *|}
+@item @verb{|@weekly|} @tab
+ Run once a week at midnight in the morning of Sunday @tab
+ @verb{|0 0 0 * * 0 *|}
+@item @verb{|@daily|} @tab
+ Run once a day at midnight @tab
+ @verb{|0 0 0 * * * *|}
+@item @verb{|@hourly|} @tab
+ Run once an hour at the beginning of the hour @tab
+ @verb{|0 0 * * * * *|}
+@end multitable
+
+@itemize
+@item
+If only six fields are present, a @verb{|0|} second field is prepended,
+that is, @verb{|* * * * * 2013|} internally become
+@verb{|0 * * * * * 2013|}.
+@item
+If only five fields are present, a @verb{|0|} second field is prepended
+and a wildcard year field is appended, that is, @verb{|* * * * Mon|}
+internally become @verb{|0 * * * * Mon *|}.
+@item
+Domain for day-of-week field is [0-7] instead of [0-6], 7 being Sunday
+(like 0). This to comply with @url{http://linux.die.net/man/5/crontab}.
+@end itemize
+
+@item nice
+Optional. Use that @ref{Niceness, niceness} during the call (255 is used
+otherwise).
+
+@item xx
+Optional. Either @verb{|rx|} or @verb{|tx|}. Tells only to either to
+receive or to transmit data during that call.
+
+@item addr
+Optional. Call only that address, instead of trying all from
+@ref{CfgAddrs, @emph{addrs}} configuration option. It can be either key
+from @emph{addrs} dictionary, or an ordinary @option{addr:port}.
+
+@item onlinedeadline
+Optional. Override @ref{CfgOnlineDeadline, @emph{onlinedeadline}}
+configuration option when calling.
+
+@item maxonlinetime
+Optional. Override @ref{CfgMaxOnlineTime, @emph{maxonlinetime}}
+configuration option when calling.
+
+@end table
noisepub: UBM5K...VI42A
sendmail: ["/bin/sh", "-c", "false"]
incoming: /home/alice/incoming
+ onlinedeadline: 1800
+ maxonlinetime: 3600
addrs:
lan: "[fe80::1234%igb0]:5400"
internet: alice.com:3389
+ calls:
+ -
+ cron: "*/2 * * * *"
bob:
id: 2IZNP...UYGYA
exchpub: WFLMZ...B7NHA
signpub: GTGXG...IE3OA
- noisepub: EQAZM...J3NBA
sendmail: [/usr/sbin/sendmail]
freq: /home/bob/pub
via: [alice]
directory. @strong{log} field contains an absolute path to @ref{Log,
log} file.
+@anchor{CfgNotify}
@strong{notify} section contains notification settings for successfully
tossed file and freq packets. Corresponding @strong{from} and
-@strong{to} fields will substituted in notification email message.
-@emph{neigh/self/sendmail} will be used as a local mailer. If either of
-@emph{from}/@emph{to} fields are omitted, then no notification will be
-sent.
+@strong{to} fields will be substituted in notification email message.
+@emph{neigh/self/sendmail} will be used as a local mailer. You can omit
+either of those two @emph{from}/@emph{to} sections to omit corresponding
+notifications, or the whole section at once.
@strong{self} section contains our node's private keypairs.
@strong{exch*} and @strong{sign*} are used during @ref{Encrypted,
node has the following fields:
@table @strong
+
@item noisepub
-Must be present, but can be dummy (only zeros) if no online
-communication using @ref{Sync, synchronization protocol} will be used.
+If present, then node can be online called using @ref{Sync,
+synchronization protocol}. Contains authentication public key.
+@anchor{CfgSendmail}
@item sendmail
An array containing path to executable and its command line arguments
-that is called for mail sending.
+that is called for mail sending. If it is empty, then no mail processing
+will be performed from that node.
+@anchor{CfgIncoming}
@item incoming
Full path to directory where all file uploads will be saved. May be
omitted to forbid file uploading on that node.
+@anchor{CfgFreq}
@item freq
Full path to directory from where file requests will queue files for
transmission. May be omitted to forbid freqing from that node.
@item via
An array of node identifiers that will be used as a relay to that node.
-For example @code{[foo,bar]} means that packet can reach current node by
-transitioning through @code{foo} and then @code{bar} nodes. May be
+For example @verb{|[foo,bar]|} means that packet can reach current node
+by transitioning through @emph{foo} and then @emph{bar} nodes. May be
omitted if direct connection exists and no relaying is required.
+@anchor{CfgAddrs}
@item addrs
Dictionary containing known network addresses of the node. Each key is
-human-readable name of the link/address. Values are @code{addr:port}
+human-readable name of the link/address. Values are @verb{|addr:port|}
pairs pointing to @ref{nncp-daemon}'s listening instance. May be omitted
if either no direct connection exists, or @ref{nncp-call} is used with
forced address specifying.
+
+@anchor{CfgOnlineDeadline}
+@item onlinedeadline
+Online connection deadline of node inactivity in seconds. It is the time
+connection considered dead after not receiving/sending any packets and
+node must disconnect. By default it is set to 10 seconds -- that means
+that disconnecting after 10 seconds when no packets received and
+transmitted. This can be set to rather high values to keep connection
+alive (to reduce handshake overhead and delays), wait for appearing
+packets ready to send and notifying remote side about their appearance.
+
+@anchor{CfgMaxOnlineTime}
+@item maxonlinetime
+If greater than zero, then it is maximal amount of time connect could be
+alive. Forcefully disconnect if it is exceeded.
+
+@anchor{CfgCalls}
+@item calls
+List of @ref{Call, call configuration}s. Can be omitted if
+@ref{nncp-caller} won't be used to call that node.
+
@end table
Nearly all commands have the following common options:
-@table @code
+@table @option
+@item -cfg
+ Path to configuration file. May be overrided by @env{NNCPCFG}
+ environment variable.
@item -debug
Print debug messages. Normally this option should not be used.
+@item -minsize
+ Minimal required resulting packet size. For example if you send 2
+ KiB file and set @option{-minsize 4096}, then resulting packet will
+ be 4 KiB (containing file itself and some junk).
@item -nice
- Set desired outgoing packet niceness level. 1-255 values are
- allowed. Higher value means lower priority. In some commands that
- means processing of packets that have equal or lower nice value.
- That is used for controlling network QoS.
+ Set desired outgoing packet @ref{Niceness, niceness level}.
+ 1-255 values are allowed.
@item -node
Process only single specified node.
@item -quiet
@section nncp-call
@verbatim
-% nncp-call [options] [-rx|-tx] NODE[:ADDR] [FORCEADDR]
+% nncp-call [options] [-onlinedeadline INT] [-maxonlinetime INT] [-rx|-tx]
+ NODE[:ADDR] [FORCEADDR]
@end verbatim
-Call (connect to) specified @code{NODE} and run @ref{Sync,
+Call (connect to) specified @option{NODE} and run @ref{Sync,
synchronization} protocol with the @ref{nncp-daemon, daemon} on the
remote side. Normally this command could be run any time you wish to
either check for incoming packets, or to send out queued ones.
Synchronization protocol allows resuming and bidirectional packets
transfer.
-If @code{-rx} option is specified then only inbound packets transmission
-is performed. If @code{-tx} option is specified, then only outbound
-transmission is performed.
+If @option{-rx} option is specified then only inbound packets
+transmission is performed. If @option{-tx} option is specified, then
+only outbound transmission is performed. @option{-onlinedeadline}
+overrides @ref{CfgOnlineDeadline, @emph{onlinedeadline}}.
+@option{-maxonlinetime} overrides @ref{CfgMaxOnlineTime,
+@emph{maxonlinetime}}.
-Each @code{NODE} can contain several uniquely identified
-@code{ADDR}esses in @ref{Configuration, configuration} file. If you do
+@node nncp-caller
+@section nncp-caller
+
+@verbatim
+% nncp-caller [options] [NODE ...]
+@end verbatim
+
+Croned daemon that calls remote nodes from time to time, according to
+their @ref{CfgCalls, @emph{calls}} configuration field.
+
+Optional number of @option{NODE}s tells to ignore other ones.
+Otherwise all nodes with specified @emph{calls} configuration
+field will be called.
+
+@option{-onlinedeadline} overrides @ref{CfgOnlineDeadline,
+@emph{onlinedeadline}} configuration option.
+
+Each @option{NODE} can contain several uniquely identified
+@option{ADDR}esses in @ref{CfgAddrs, configuration} file. If you do
not specify the exact one, then all will be tried until the first
-success. Optionally you can force @code{FORCEADDR} address usage,
+success. Optionally you can force @option{FORCEADDR} address usage,
instead of addresses taken from configuration file.
-Pay attention that this command run integrity check for each completely
-received packet in the background. This can be time consuming and
-connection could be lost during that check time and remote node won't be
+Pay attention that this command runs integrity check for each completely
+received packet in the background. This can be time consuming.
+Connection could be lost during that check and remote node won't be
notified that file is done. But after successful integrity check that
-file will be renamed from @code{.part} one and when you rerun
-@code{nncp-call} again, remote node will receive completion at once.
+file is renamed from @file{.part} one and when you rerun
+@command{nncp-call} again, remote node will receive completion
+notification.
@node nncp-check
@section nncp-check
Perform @ref{Spool, spool} directory integrity check. Read all files
that has Base32-encoded filenames and compare it with recalculated
-BLAKE2b hash output of their contents. This supplementary command are
+BLAKE2b hash output of their contents. This supplementary command is
not used often in practice, if ever.
@node nncp-daemon
@ref{nncp-toss} utility in background to process inbound packets from
time to time.
-@code{-maxconn} option specifies how many simultaneous clients daemon
-can handle. @code{-bind} option specifies @code{addr:port} it must bind
-to and listen.
+@option{-maxconn} option specifies how many simultaneous clients daemon
+can handle. @option{-bind} option specifies @option{addr:port} it must
+bind to and listen.
@node nncp-file
@section nncp-file
% nncp-file [options] SRC NODE:[DST]
@end verbatim
-Send @code{SRC} file to remote @code{NODE}. @code{DST} specifies
-destination file name in remote's @ref{Configuration, incoming}
+Send @file{SRC} file to remote @option{NODE}. @file{DST} specifies
+destination file name in remote's @ref{CfgIncoming, incoming}
directory. If this file already exists there, then counter will be
appended to it.
(through the temporary file of course) -- so pay attention that sending
2 GiB file will create 2 GiB outbound encrypted packet.
-If @ref{Configuration, notification} is enabled on the remote side for
+If @ref{CfgNotify, notification} is enabled on the remote side for
file transmissions, then it will sent simple letter after successful
file receiving.
% nncp-freq [options] NODE:SRC DST
@end verbatim
-Send file request to @code{NODE}, asking it to send its @code{SRC} file
-from @ref{Configuration, freq} directory to our node under @code{DST}
-filename in our @ref{Configuration, incoming} one.
+Send file request to @option{NODE}, asking it to send its @file{SRC}
+file from @ref{CfgFreq, freq} directory to our node under @file{DST}
+filename in our @ref{CfgIncoming, incoming} one.
-If @ref{Configuration, notification} is enabled on the remote side for
+If @ref{CfgNotify, notification} is enabled on the remote side for
file request, then it will sent simple letter after successful file
queuing.
% nncp-mail [options] NODE USER ...
@end verbatim
-Send mail, that is read from stdin, to @code{NODE} and specified
-@code{USER}s. Mail message will be compressed. After receiving, remote
-side will execute specified @ref{Configuration, sendmail} command with
-@code{USER}s appended as a command line argument and feed decompressed
+Send mail, that is read from stdin, to @option{NODE} and specified
+@option{USER}s. Mail message will be compressed. After receiving, remote
+side will execute specified @ref{CfgSendmail, sendmail} command with
+@option{USER}s appended as a command line argument and feed decompressed
mail body to that command's stdin.
@node nncp-newnode
@verbatim
% nncp-pkt [options] < pkt
-% nncp-pkt [options] -dump < pkt > payload
+% nncp-pkt [options] [-decompress] -dump < pkt > payload
@end verbatim
Low level packet parser. Normally it should not be used, but can help in
Packet type: encrypted
Niceness: 64
Sender: 2WHBV3TPZHDOZGUJEH563ZEK7M33J4UESRFO4PDKWD5KZNPROABQ
-Payload size: 4.0 MiB (4162852 bytes)
@end verbatim
-If you specify @code{-dump} option and provide an @ref{Encrypted,
+If you specify @option{-dump} option and provide an @ref{Encrypted,
encrypted} packet, then it will verify and decrypt it to stdout.
Encrypted packets contain @ref{Plain, plain} ones, that also can be fed
-to @code{nncp-pkt}:
+to @command{nncp-pkt}:
@verbatim
Packet type: plain
Path: stargrave@stargrave.org
@end verbatim
-And with the @code{-dump} option it will give you the actual payload
-(the whole file, mail message, and so on).
+And with the @option{-dump} option it will give you the actual payload
+(the whole file, mail message, and so on). @option{-decompress} option
+tries to zlib-decompress the data from plain packet (useful for mail
+packets).
@node nncp-stat
@section nncp-stat
@section nncp-toss
@verbatim
-% nncp-toss [options] [-dryrun]
+% nncp-toss [options] [-dryrun] [-cycle INT]
@end verbatim
Perform "tossing" operation on all inbound packets. This is the tool
copies files, sends mails, sends out file requests and relays transition
packets. It should be run after each online/offline exchange.
-@code{-dryrun} option does not perform any writing and sending, just
+@option{-dryrun} option does not perform any writing and sending, just
tells what it will do.
+@option{-cycle} option tells not to quit, but to repeat tossing every
+@option{INT} seconds in an infinite loop. That can be useful when
+running this command as a daemon.
+
@node nncp-xfer
@section nncp-xfer
% nncp-xfer [options] [-force] [-keep] [-rx|-tx] DIR
@end verbatim
-Search for directory in @code{DIR} containing inbound packets for us and
+Search for directory in @file{DIR} containing inbound packets for us and
move them to local @ref{Spool, spool} directory. Also search for known
neighbours directories and move locally queued outbound packets to them.
This command is used for offline packets transmission.
-If @code{-force} option is specified, then outbound neighbour(s)
+If @option{-force} option is specified, then outbound neighbour(s)
directories will be created. This is useful for the first time usage,
when storage device does not have any directories tree.
-If @code{-keep} option is specified, then keep copied files, do not
+If @option{-keep} option is specified, then keep copied files, do not
remove them.
-@code{-rx} option tells only to move inbound packets addressed to us.
-@code{-tx} option tells exactly the opposite: move only outbound packets.
+@option{-rx} option tells only to move inbound packets addressed to us.
+@option{-tx} option tells exactly the opposite: move only outbound packets.
-@code{DIR} directory has the following structure:
-@code{RECIPIENT/SENDER/PACKET}, where @code{RECIPIENT} is Base32 encoded
-destination node, @code{SENDER} is Base32 encoded sender node.
+@file{DIR} directory has the following structure:
+@file{RECIPIENT/SENDER/PACKET}, where @file{RECIPIENT} is Base32 encoded
+destination node, @file{SENDER} is Base32 encoded sender node.
(or use @url{https://sourceforge.net/projects/nncp/files/, Sourceforge mirror}).
Do not forget to check tarball @ref{Integrity, integrity}.
+Tarballs include all necessary required libraries:
+
+@multitable @columnfractions .50 .50
+@headitem Library @tab Licence
+@item @code{github.com/dustin/go-humanize} @tab MIT
+@item @code{github.com/flynn/noise} @tab BSD 3-Clause
+@item @code{github.com/go-check/check} @tab BSD 2-Clause
+@item @code{github.com/go-yaml/yaml} @tab Apache License 2.0 and MIT
+@item @code{github.com/gorhill/cronexpr} @tab GPLv3
+@item @code{github.com/minio/blake2b-simd} @tab Apache License 2.0
+@item @code{golang.org/x/crypto} @tab BSD 3-Clause
+@item @code{golang.org/x/net} @tab BSD 3-Clause
+@item @code{golang.org/x/sys} @tab BSD 3-Clause
+@end multitable
+
@multitable {XXXXX} {XXXX KiB} {link sign} {xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx}
@headitem Version @tab Size @tab Tarball @tab SHA256 checksum
+@item 0.1 @tab 720 KiB
+@tab @url{download/nncp-0.1.tar.xz, link} @url{download/nncp-0.1.tar.xz.sig, sign}
+@tab @code{8F71D65B 70865EBF FE802CDF A5C14D00 A9FD6559 FD722E60 5D97E82C 5E2412C2}
+
@end multitable
Simplicity, cryptographic security, sneakernet compatibility and easy
integration with current SMTP servers are the reasons.
+@center Interested? @ref{Tarballs, @strong{Download it}}!
+
@menu
* Comparison::
* Use cases::
* Workflow::
+* News::
* Installation::
* Configuration::
+* Call configuration: Call.
+* Integration with Postfix: Postfix.
* Commands::
* Niceness::
* Spool directory: Spool.
* Log format: Log.
* Packet format: Packet.
* Sync protocol: Sync.
+* Thanks::
* Contacts and feedback: Contacts.
* Copying conditions: Copying.
@end menu
@include comparison.texi
@include usecases.texi
@include workflow.texi
+@include news.texi
@include install.texi
@include cfg.texi
+@include call.texi
+@include postfix.texi
@include cmds.texi
@include niceness.texi
@include spool.texi
@include log.texi
@include pkt.texi
@include sp.texi
+@include thanks.texi
@include contacts.texi
@node Copying
convenient building. @url{https://www.gnu.org/software/texinfo/,
Texinfo} is used for building documentation.
-Included required libraries:
-
-@multitable @columnfractions .50 .50
-@headitem Library @tab Licence
-@item @code{github.com/dustin/go-humanize} @tab MIT
-@item @code{github.com/flynn/noise} @tab BSD 3-Clause
-@item @code{github.com/go-check/check} @tab BSD 2-Clause
-@item @code{github.com/go-yaml/yaml} @tab Apache License 2.0 and MIT
-@item @code{github.com/minio/blake2b-simd} @tab Apache License 2.0
-@item @code{golang.org/x/crypto} @tab BSD 3-Clause
-@item @code{golang.org/x/net} @tab BSD 3-Clause
-@item @code{golang.org/x/sys} @tab BSD 3-Clause
-@end multitable
-
-Get @ref{Tarballs, the tarball}, check its
+In general you must get @ref{Tarballs, the tarball}, check its
@ref{Integrity, integrity and authenticity} and run @command{make}.
-@emph{nncp-*} binaries will be built in the current directory:
-
-@verbatim
-% wget http://www.nncpgo.org/download/nncp-0.1.tar.xz
-% wget http://www.nncpgo.org/download/nncp-0.1.tar.xz.sig
-% gpg --verify nncp-0.1.tar.xz.sig nncp-0.1.tar.xz
-% tar xf nncp-0.1.tar.xz
-% make -C nncp-0.1 all
-@end verbatim
-
-There is @code{install} target respecting @env{DESTDIR}. It will
-install binaries and info-documentation.
+Look for general and @ref{Platform-specific, platform-specific}
+installation instructions.
@menu
* Prepared tarballs: Tarballs.
* Tarballs integrity check: Integrity.
+* Platform-specific instructions: Platform-specific.
* Development source code: Sources.
@end menu
@include download.texi
@include integrity.texi
+@include platforms.texi
@include sources.texi
I 2017-01-09T08:48:59.264847401Z [call-finish duration="10" node="BYRRQUULEHINPKEFN7CHMSHR5I5CK7PMX5HQNCYERTBAR4BOCG6Q" rxbytes="60" rxspeed="60" txbytes="108" txspeed="108"]
@end verbatim
-@table @code
+@table @emph
@item |
Space character.
@item LEVEL
- Is single character log level. As a rule is is either @code{I}
- (informational message), or @code{E} (error message).
+ Is single character log level. As a rule is is either @verb{|I|}
+ (informational message), or @verb{|E|} (error message).
@item DATETIME
- UTC datetime in RFC 3339 @code{2006-01-02T15:04:05.999999999Z} format.
+ UTC datetime in RFC 3339 @verb{|2006-01-02T15:04:05.999999999Z|} format.
@item SD
Structured data as in RFC 5424.
@item MSG
--- /dev/null
+@node News
+@unnumbered News
+
+@node Release 0.2
+@section Release 0.2
+@itemize
+@item @strong{Incompatible} packet's format change (magic number is
+changed too): size field is encrypted and is not send in plaintext
+anymore.
+@item @option{-minsize} option gives ability to automatically pad
+outgoing packets to specified minimal size.
+@item @ref{nncp-daemon} and @ref{nncp-call}/@ref{nncp-caller} always
+check new @emph{tx} packets appearance in the background while
+connected. Remote side is immediately notified.
+@item @option{-onlinedeadline} option gives ability to configure timeout
+of inactivity of online connection, when it could be disconnected. It
+could be used to keep connection alive for a long time.
+@item @option{-maxonlinetime} option gives ability to set maximal
+allowable online connection aliveness time.
+@item @ref{nncp-caller} command appeared: cron-ed TCP daemon caller.
+@item @ref{nncp-pkt} command can decompress the data.
+@end itemize
@node Niceness
@unnumbered Niceness
-Each transmitted packet has niceness level, as Unix has @code{nice}
+Each transmitted packet has niceness level, as Unix has @command{nice}
command for controlling processes priority. Higher nicer level means
that packet is "nicer" and allows other to bypass him -- that means
lower transmission precedence.
@headitem @tab XDR type @tab Value
@item Magic number @tab
8-byte, fixed length opaque data @tab
- @code{NNCPP0x10x00x00}
+ @verb{|N N C P P 0x00 0x00 0x01|}
@item Payload type @tab
unsigned integer @tab
0 (file), 1 (freq), 2 (mail), 3 (transition)
@item Path length @tab
unsigned integer @tab
- actual length of following field's payload
+ actual length of @emph{path} field's payload
@item Path @tab
255 byte, fixed length opaque data @tab
@itemize
@item UTF-8 encoded destination path for file transfer
@item UTF-8 encoded source path for file request
@item UTF-8 encoded, space separated, email recipients list
- @item Node id the transition packet must be relayed on
+ @item Node's id the transition packet must be relayed on
@end itemize
@end multitable
Each encrypted packet has the following header:
@verbatim
- HEADER
-+--------------------------------------------+-------...--------+
-| MAGIC | NICE | SENDER | EPUB | SIGN | SIZE | CIPHERTEXT | MAC |
-+------------------------------/------\------+-------...--------+
+ +------------ HEADER -------------+ +-------- ENCRYPTED --------+
+ / \ / \
++-------------------------------------+------------+----...-----------+------+
+| MAGIC | NICE | SENDER | EPUB | SIGN | SIZE | MAC | CIPHERTEXT | MAC | JUNK |
++------------------------------/------\------------+----...-----------+------+
/ \
- +--------------------------------------------+
- | MAGIC | NICE | RCPT | SENDER | EPUB | SIZE |
- +--------------------------------------------+
+ +-------------------------------------+
+ | MAGIC | NICE | RCPT | SENDER | EPUB |
+ +-------------------------------------+
@end verbatim
@multitable @columnfractions 0.2 0.3 0.5
@headitem @tab XDR type @tab Value
@item Magic number @tab
8-byte, fixed length opaque data @tab
- @code{NNCPE0x10x00x00}
+ @verb{|N N C P E 0x00 0x00 0x01|}
@item Niceness @tab
unsigned integer @tab
1-255, packet @ref{Niceness, niceness} level
@item Signature @tab
64-byte, fixed length opaque data @tab
ed25519 signature for that packet's header
-@item Size @tab
- unsigned hyper integer @tab
- Encrypted payload size
@end multitable
Signature is calculated over the following structure:
@item Recipient (32-byte recipient node's id)
@item Sender
@item Exchange public key
-@item Size
@end itemize
-Actual encrypted payload comes after that header. Payload is encrypted
-using @url{https://www.schneier.com/academic/twofish/, Twofish}
-algorithm with 256-bit key in
+All following encryption is done using
+@url{https://www.schneier.com/academic/twofish/, Twofish} algorithm with
+256-bit key in
@url{https://en.wikipedia.org/wiki/Counter_mode#Counter_.28CTR.29, CTR}
mode of operation with zero initialization vector (because each
-encrypted packet has ephemeral exchange key). Ciphertext's length is
-equal to plaintext. @url{https://blake2.net/, BLAKE2b-256} MAC is
-appended to the ciphertext.
+encrypted packet has ephemeral exchange key). @url{https://blake2.net/,
+BLAKE2b-256} MAC is appended to the ciphertext.
+
+After the headers comes an encrypted payload size and MAC of that size.
+
+@multitable @columnfractions 0.2 0.3 0.5
+@headitem @tab XDR type @tab Value
+@item Size @tab
+ unsigned hyper integer @tab
+ Payload size.
+@end multitable
+
+Next comes the actual encrypted payload with corresponding MAC.
Each node has static @strong{exchange} and @strong{signature} keypairs.
When node A want to send encrypted packet to node B, it:
@enumerate
@item generates ephemeral @url{http://cr.yp.to/ecdh.html, curve25519} keypair
-@item prepares structure for signing (underlying payload size must be
-already known)
-@item signs that structure using private @url{http://ed25519.cr.yp.to/,
-ed25519} signature key
+@item prepares structure for signing
+@item signs that structure using private
+ @url{http://ed25519.cr.yp.to/, ed25519} signature key
@item takes remote node's exchange public key and performs
-Diffie-Hellman computation on this remote static public key and private
-ephemeral one
-@item derived ephemeral key used as an input to
-@url{https://en.wikipedia.org/wiki/HKDF, HKDF}-BLAKE2b-256 key
-derivation function
-@item two 256-bit keys are derived from it for using with Twofish and
-BLAKE2b-MAC functions
-@item Twofish encryption is performed over the plaintext and
-BLAKE2b-MACing is performed over the ciphertext. Ciphertext and MAC tag
-go after header.
+ Diffie-Hellman computation on this remote static public key and
+ private ephemeral one
+@item derived ephemeral key is used as an input to
+ @url{https://en.wikipedia.org/wiki/HKDF, HKDF}-BLAKE2b-256 KDF
+@item derives four session keys using
+ @url{https://en.wikipedia.org/wiki/HKDF, HKDF}-BLAKE2b-256 KDF:
+ @enumerate
+ @item "Size" encryption (for Twofish) key
+ @item "Size" authentication (for BLAKE2b-MAC) key
+ @item Payload encryption key
+ @item Payload authentication key
+ @end enumerate
+@item encrypts size, appends its ciphertext to the header
+@item appends MAC tag over that ciphertext
+@item encrypts and appends payload ciphertext
+@item appends MAC tag over that payload ciphertext
+@item possibly appends any kind of "junk" noise data to hide real
+ payload's size from the adversary
@end enumerate
--- /dev/null
+@node Platform-specific
+@section Platform-specific instructions
+
+@node General
+@subsection General installation instructions
+
+@verbatim
+% wget http://www.nncpgo.org/download/nncp-0.1.tar.xz
+% wget http://www.nncpgo.org/download/nncp-0.1.tar.xz.sig
+% gpg --verify nncp-0.1.tar.xz.sig nncp-0.1.tar.xz
+% tar xf nncp-0.1.tar.xz
+% make -C nncp-0.1 all
+@end verbatim
+
+There is @command{install} target respecting @env{DESTDIR}. It will
+install binaries and info-documentation.
+
+@node FreeBSD
+@subsection FreeBSD installation instructions
+
+Look @ref{Integrity, here} for finding public keys for tarball authentication.
+
+@verbatim
+# pkg install go
+@end verbatim
+
+follow @ref{General, general} installation instructions
+
+@verbatim
+# make -C nncp-0.1 install
+@end verbatim
+
+@node Ubuntu
+@subsection Ubuntu installation instructions
+
+Look @ref{Integrity, here} for finding public keys for tarball authentication.
+
+@table @asis
+@item Ubuntu 16.04
+
+@verbatim
+# apt install golang
+@end verbatim
+
+follow @ref{General, general} installation instructions
+
+@verbatim
+# make -C nncp-0.1 install PREFIX=/usr
+@end verbatim
+
+@item Ubuntu 14.04
+
+@verbatim
+# apt-get install golang-1.6
+% wget http://www.nncpgo.org/download/nncp-0.1.tar.xz
+% wget http://www.nncpgo.org/download/nncp-0.1.tar.xz.sig
+% gpg --verify nncp-0.1.tar.xz.sig nncp-0.1.tar.xz
+% tar xf nncp-0.1.tar.xz
+% PATH=/usr/lib/go-1.6/bin:$PATH make -C nncp-0.1 all
+# make -C nncp-0.1 install PREFIX=/usr
+@end verbatim
+
+@end table
--- /dev/null
+@node Postfix
+@unnumbered Integration with Postfix
+
+This section is taken from @url{http://www.postfix.org/nncp_README.html,
+Postfix and UUCP} manual and just replaces UUCP-related calls with NNCP
+ones.
+
+@strong{Setting up a Postfix Internet to NNCP gateway}
+
+Here is how to set up a machine that sits on the Internet and that forwards
+mail to a LAN that is connected via NNCP.
+
+@itemize
+
+@item You need an @ref{nncp-mail} program that extracts the sender
+address from mail that arrives via NNCP, and that feeds the mail into
+the Postfix @command{sendmail} command.
+
+@item Define a @command{pipe(8)} based mail delivery transport for
+delivery via NNCP:
+@verbatim
+/usr/local/etc/postfix/master.cf:
+nncp unix - n n - - pipe
+ flags=F user=nncp argv=nncp-mail -quiet $nexthop $recipient
+@end verbatim
+
+This runs the @command{nncp-mail} command to place outgoing mail into
+the NNCP queue after replacing @var{$nexthop} by the the receiving NNCP
+node and after replacing @var{$recipient} by the recipients. The
+@command{pipe(8)} delivery agent executes the @command{nncp-mail}
+command without assistance from the shell, so there are no problems with
+shell meta characters in command-line parameters.
+
+@item Specify that mail for @emph{example.com}, should be delivered via
+NNCP, to a host named @emph{nncp-host}:
+
+@verbatim
+/usr/local/etc/postfix/transport:
+ example.com nncp:nncp-host
+ .example.com nncp:nncp-host
+@end verbatim
+
+See the @command{transport(5)} manual page for more details.
+
+@item Execute the command @command{postmap /etc/postfix/transport}
+whenever you change the @file{transport} file.
+
+@item Enable @file{transport} table lookups:
+
+@verbatim
+/usr/local/etc/postfix/main.cf:
+ transport_maps = hash:$config_directory/transport
+@end verbatim
+
+@item Add @emph{example.com} to the list of domains that your site is
+willing to relay mail for.
+
+@verbatim
+/usr/local/etc/postfix/main.cf:
+ relay_domains = example.com ...other relay domains...
+@end verbatim
+
+See the @option{relay_domains} configuration parameter description for
+details.
+
+@item Execute the command @command{postfix reload} to make the changes
+effective.
+
+@end itemize
+
+@strong{Setting up a Postfix LAN to NNCP gateway}
+
+Here is how to relay mail from a LAN via NNCP to the Internet.
+
+@itemize
+
+@item You need an @ref{nncp-mail} program that extracts the sender
+address from mail that arrives via NNCP, and that feeds the mail into
+the Postfix @command{sendmail} command.
+
+@item Specify that all remote mail must be sent via the @command{nncp}
+mail transport to your NNCP gateway host, say, @emph{nncp-gateway}:
+
+@verbatim
+/usr/local/etc/postfix/main.cf:
+ relayhost = nncp-gateway
+ default_transport = nncp
+@end verbatim
+
+Postfix 2.0 and later also allows the following more succinct form:
+
+@verbatim
+/usr/local/etc/postfix/main.cf:
+ default_transport = nncp:nncp-gateway
+@end verbatim
+
+@item Define a @command{pipe(8)} based message delivery transport for
+mail delivery via NNCP:
+
+@verbatim
+/usr/local/etc/postfix/master.cf:
+nncp unix - n n - - pipe
+ flags=F user=nncp argv=nncp-mail -quiet $nexthop $recipient
+@end verbatim
+
+This runs the @command{nncp-mail} command to place outgoing mail into
+the NNCP queue. It substitutes the hostname (@emph{nncp-gateway}, or
+whatever you specified) and the recipients before executing the command.
+The @command{nncp-mail} command is executed without assistance from the
+shell, so there are no problems with shell meta characters.
+
+@item Execute the command @command{postfix reload} to make the changes
+effective.
+
+@end itemize
Development source code contains the latest version of the code. It may
be buggy. It does not contain compiled documentation and dependent
-libraries source code. Because of that, it is not recommended for
-porters to use @ref{Tarballs} instead.
+libraries source code. Because of that, it is recommended for porters
+to use @ref{Tarballs, tarballs} instead.
You can obtain it by cloning @url{http://git-scm.com/, Git}
@url{http://git.cypherpunks.ru/cgit.cgi/nncp.git/log/, repository}
@headitem @tab XDR type @tab Value
@item Magic number @tab
8-byte, fixed length opaque data @tab
- @code{NNCPS0x10x00x00}
+ @verb{|N N C P S 0x00 0x00 0x01|}
@item Payload @tab
variable length opaque data @tab
Noise packet itself
@end multitable
-Peers static keys are specified as @ref{Configuration, @code{noisepub}}
+Peers static keys are specified as @ref{Configuration, @emph{noisepub}}
configuration entry.
-Payload inside Noise packets has maximum size of @code{65 KiB - 256 B =
+Payload inside Noise packets has maximum size of @emph{64 KiB - 256 B =
65280 B}. It is sent immediately in the first message by each side. The
very first payload (that is carried inside handshake messages) is always
-padded to the maximum size with @code{HALT} packets (read below), for
-hiding actual number of @code{INFO} packets (number of files available
+padded to the maximum size with @emph{HALT} packets (read below), for
+hiding actual number of @emph{INFO} packets (number of files available
for transmission).
Each SP payload is a concatenation of SP packets. Each packet has
XDR-encoded header and then corresponding XDR-encoded body. Header is
just an unsigned integer telling what body structure follows.
-@table @code
+@table @emph
@item HALT
Stop file transmission, empty sending queue on the remote side.
- Actually @code{HALT} packet does not have any body, only the header
+ Actually @emph{HALT} packet does not have any body, only the header
with the type. It is also used in the first payload for padding to
the maximum size.
+@verbatim
++------+
+| HALT |
++------+
+@end verbatim
@item INFO
Information about the file we have for transmission.
@verbatim
-+--------------------+
-| NICE | SIZE | HASH |
-+--------------------+
++------+--------------------+
+| INFO | NICE | SIZE | HASH |
++------+--------------------+
@end verbatim
@multitable @columnfractions 0.2 0.3 0.5
@headitem @tab XDR type @tab Value
File transmission request. Ask remote side to queue the file for
transmission.
@verbatim
-+---------------+
-| HASH | OFFSET |
-+---------------+
++------+---------------+
+| FREQ | HASH | OFFSET |
++------+---------------+
@end verbatim
@multitable @columnfractions 0.2 0.3 0.5
@headitem @tab XDR type @tab Value
@item FILE
Chunk of file.
@verbatim
-+-------------------------+
-| HASH | OFFSET | PAYLOAD |
-+-------------------------+
++------+-------------------------+
+| FILE | HASH | OFFSET | PAYLOAD |
++------+-------------------------+
@end verbatim
@multitable @columnfractions 0.2 0.3 0.5
@headitem @tab XDR type @tab Value
@item DONE
Signal remote side that we have successfully downloaded the file.
@verbatim
-+------+
-| HASH |
-+------+
++------+------+
+| DONE | HASH |
++------+------+
@end verbatim
@multitable @columnfractions 0.2 0.3 0.5
@headitem @tab XDR type @tab Value
@end multitable
@end table
+
+Typical peers behaviour is following:
+
+@enumerate
+@item Perform Noise-IK handshake.
+@item When remote peer's identity is known (by definition for initiator
+and after receiving first packet for responser (however it is not
+authenticated yet)), then collect all @emph{tx}-related files
+information and prepare payload packets with all that @emph{INFO}s.
+@item Pad the very first payload packet (that is sent with first Noise
+handshake message) with @emph{HALT}s to the maximal size.
+@item Send all queued payload packets.
+@item When @emph{INFO} packet received, check that is has an acceptable
+niceness level (skip if not), check if file's @file{.part} exists and
+queue @emph{FREQ} outgoing packet (with corresponding offset if
+required).
+@item When @emph{FREQ} packet received, append it to current sending
+queue. Sending queue contains files with offsets that are needed to be
+sent.
+@item While sending queue is not empty, send @emph{FILE} packet until
+queue's head is not fully sent. @emph{FREQ} can contain offset equal to
+size -- anyway sent @emph{FILE} packet with an empty payload.
+@item When @emph{FILE} packet received, check if it is not fully
+downloaded (comparing to @emph{INFO}'s packet information). If so, then
+run background integrity checker on it. If check is succeeded, then
+delete @file{.part} suffix from file's name and send @emph{DONE} packet.
+@item When @emph{DONE} packet received, delete corresponding file.
+@item When @emph{HALT} packet received, empty file sending queue.
+@item @emph{FILE} sending is performed only if no other outgoing packets
+are queued.
+@item Each second node check are there any new @emph{tx} packets
+appeared and queues corresponding @emph{INFO} packets.
+@item If no packets are sent and received during @ref{CfgOnlineDeadline,
+onlinedeadline} duration, then close the connection. There is no
+explicit indication that session is over.
+@end enumerate
spool/BYRR...CG6Q/tx/ZI5U...5RRQ
@end verbatim
-Except for @code{tmp}, all other directories are Base32-encoded node
-identifiers (@code{2WHB...OABQ}, @code{BYRR...CG6Q} in our example).
-Each node subdirectory has @code{rx} (received, partly received and
-currently unprocessed packets) and @code{tx} (for outbound packets)
+Except for @file{tmp}, all other directories are Base32-encoded node
+identifiers (@file{2WHB...OABQ}, @file{BYRR...CG6Q} in our example).
+Each node subdirectory has @file{rx} (received, partly received and
+currently unprocessed packets) and @file{tx} (for outbound packets)
directories.
-Each @code{rx}/@code{tx} directory contains one file per encrypted
+Each @file{rx}/@file{tx} directory contains one file per encrypted
packet. Its filename is Base32 encoded BLAKE2b hash of the contents. So
-it can be integrity checked at any time. @code{5ZIB...UMKW.part} is
-partly received file from @code{2WHB...OABQ} node. @code{tx} directory
+it can be integrity checked at any time. @file{5ZIB...UMKW.part} is
+partly received file from @file{2WHB...OABQ} node. @file{tx} directory
can not contain partly written files -- they are moved atomically from
-@code{tmp}.
+@file{tmp}.
-Only one process can work with @code{rx}/@code{tx} directories at once,
+Only one process can work with @file{rx}/@file{tx} directories at once,
so there are corresponding lock files.
--- /dev/null
+@node Thanks
+@unnumbered Thanks
+
+There are people deserving to be thanked for helping this project:
+
+@itemize
+@item Shawn K. Quinn for his descriptive instructions about building
+NNCP under Ubuntu GNU/Linux distributions and bug reports.
+@end itemize
Another possibility is to use POP3/IMAP4 servers, but this is too
overcomplicated and bloated for the simple task. Not an option. KISS!
-@anchor{Postfix}
-
Just tell both of your Postfixes (on the server and notebook) to drop
-email as a mail via NNCP to specified node. This is done similarly as
-with UUCP and as written in Postfix
-@url{http://www.postfix.org/UUCP_README.html, documentation}.
-
-Search for @code{uucp} related strings in @code{master.cf} and replace
-command to NNCP ones:
-
-@verbatim
-nncp unix - n n - - pipe flags=Fqhu user=nncp argv=nncp-mail -quiet $nexthop $recipient
-@end verbatim
-
-then add transport map, telling that mail for example.com domain can be
-reached through NNCP transport to node @code{bob}:
-
-@verbatim
-example.com nncp:bob
-@end verbatim
+email as a mail via NNCP (@ref{nncp-mail}) to specified node. This is
+done similarly as with UUCP and as written in
+@url{http://www.postfix.org/UUCP_README.html, Postfix documentation}.
-Now, all mail will be stored in NNCP @ref{Spool, spool}, that after
-exchanging and tossing will call local @code{sendmail} command to
-deliver them just that was happened on the same machine.
+Look @ref{Postfix, here} for further information. All mail will be
+stored in NNCP @ref{Spool, spool}, that after exchanging and tossing
+will call local @command{sendmail} command to deliver them just like
+that happened on the same machine.
@node UsecaseUnreliable
@section Unreliable/expensive communication link
Assume that you have got slow modem/radio/cellular link that frequently
disconnects and causes TCP timeouts. Not all HTTP servers support file
download continuation. SMTP does not support resuming at all and heavy
-messages is a problem to retrieve. Moreover, each disconnect leads to
-the same data retransmission again, that can be expensive to afford.
+messages is problematic to retrieve. Moreover, each disconnect leads to
+the same data retransmission again, that can not be afforded sometimes.
-Just send your mail and files through NNCP. You can use either offline
-delivery methods -- read about them in the next section, or you can use
-included NNCP TCP daemon.
+Just send your @ref{nncp-mail, mail} and @ref{nncp-file, files} through
+NNCP. You can use either offline delivery methods -- read about them in
+the next section, or you can use included NNCP @ref{nncp-daemon, TCP
+daemon}.
-The command below:
+The command:
@verbatim
% nncp-file file_i_want_to_send bob:
% nncp-file another_file bob:movie.avi
@end verbatim
-will queue two files for sending to @code{bob} node. Fire and forget!
+will queue two files for sending to @code{emph} node. Fire and forget!
Now this is daemon's job (or offline transfer) to send this file part by
part to remote system when it is available.
choice. Just send files as shown in previous section, but use removable
media for transferring packets to other nodes.
-Assume that you send two files to @code{bob} node. Insert USB storage
-device, mount it and run:
+Assume that you send two files to @emph{bob} node. Insert USB storage
+device, mount it and run @ref{nncp-xfer}:
@verbatim
% nncp-xfer -node bob /media/usbstick
@end verbatim
-to copy all outbound packets related to @code{bob}'s node. Use
-@code{-force} option to forcefully create related directory on USB
+to copy all outbound packets related to @emph{bob}'s node. Use
+@option{-force} option to forcefully create related directory on USB
storage if they are missing (for example when running for the first
time).
-If you use single storage device to transfer data both to @code{bob} and
-@code{alice}, then just omit @code{-node} option to copy all existing
+If you use single storage device to transfer data both to @emph{bob} and
+@emph{alice}, then just omit @option{-node} option to copy all existing
outgoing packets to that storage device.
@verbatim
@end verbatim
to find all packets related to their node and copy them locally for
-further processing. @code{nncp-xfer} is the only command used with
-removable devices.
+further processing. nncp-xfer is the only command used with removable
+devices.
@node UsecaseF2F
@section Private, isolated MitM-resistant networks
and forged nodes. However they are harder to support and require more
time to be done right.
-NNCP's TCP daemon uses @url{http://noiseprotocol.org/, Noise-IK}
-protocol to mutually authenticate peers and provide effective (both
-participants send payload in the very first packet) secure transport
-with forward secrecy property.
+NNCP's @ref{nncp-daemon, TCP daemon} uses
+@url{http://noiseprotocol.org/, Noise-IK} protocol to mutually
+authenticate peers and provide effective (both participants send payload
+in the very first packet) secure transport with forward secrecy
+property.
@verbatim
% nncp-daemon -bind [::]:5400
@verbatim
% nncp-call bob
@end verbatim
-will try to connect to @code{bob}'s node known TCP addresses (taken from
+will try to connect to @emph{bob}'s node known TCP addresses (taken from
configuration file) and send all related outbound packets and retrieve
those the Bob has. All interrupted transfers will be automatically
resumed.
via: [bob]
@end verbatim
-That configuration file tells that we have got two known neighbours:
-@code{bob} and @code{bob-airgap}. @code{bob} can be reached via online
-connection using @code{lan} address. @code{bob-airgap} can be reached by
-sending intermediate relay packet through the @code{bob}.
+That @ref{Configuration, configuration file} tells that we have got two
+known neighbours: @emph{bob} and @emph{bob-airgap}. @emph{bob} can be
+reached via online connection using @emph{lan} address.
+@emph{bob-airgap} can be reached by sending intermediate relay packet
+through the @emph{bob}.
-Any command like @code{nncp-file myfile bob-airgap:} will automatically
-create an encapsulated packet: one for the destination endpoint, and
-other carrying it for intermediate relaying node.
+Any command like @command{nncp-file myfile bob-airgap:} will
+automatically create an encapsulated packet: one for the destination
+endpoint, and other carrying it for intermediate relaying node.
Pay attention that relaying node knows nothing about the packet inside,
but just its size and priority. Transition packets are encrypted too.
-@code{bob} can not read @code{bob-airgap}'s packets.
+@emph{bob} can not read @emph{bob-airgap}'s packets.
@node UsecaseCensor
@section Network censorship bypassing
entertainment content delivering and popular social networks access
(that are already bloated with advertisements, locally executed
proprietary JavaScript code (for spying on user activities, collect data
-on them), shamelessly exploiting of very basic interhuman need of
-communication).
+on them), shamelessly exploiting the very basic human need of communication).
This is their natural wish. But nobody forces you to obey huge
corporations like Apple, Google or Microsoft. It is your choice to
-create isolated friend-to-friend network with piles of harmless content
-and private messaging. Only predators silently watch for their victims
-in mammals world -- it harms your health being watched and feeling that
-you are the victim that has already done something wrong.
+create an isolated friend-to-friend network with piles of harmless
+content and private messaging. Only predators silently watch for their
+victims in mammals world -- it harms your health being watched and
+feeling that you are the victim that has already done something wrong.
@node UsecaseSpy
@section Reconnaissance, spying, intelligence, covert agents
be pretty fast, allowing to quickly fire chunks of queued packets.
Very important property is that compromising of those dead drops and
-storages must not be fatal and even dangerous. Packets sent through the
-network and exchanged via those devices are end-to-end @ref{Encrypted,
-encrypted} (but unfortunately lacking forward secrecy). No filenames,
-mail recipients are seen.
+storages must be neither fatal nor even dangerous. Packets sent through
+the network and exchanged via those devices are end-to-end
+@ref{Encrypted, encrypted} (but unfortunately lacking forward secrecy).
+No filenames, mail recipients are seen.
All communications are done with so-called @ref{Spool, spool} area:
directory containing only those unprocessed encrypted packets. After
packet transfer you still can not read any of them: you have to run
-another stage: tossing, that involves your private cryptographic keys.
-So even if your loose your computer, storage devices and so on -- it is
-not so bad, because you are not carrying private keys with it, you do
-not "toss" those packets immediately on the same device. Tossing
-(reading those encrypted packets and extracting transferred files and
-mail messages) could and should be done on a separate computer.
+another stage: @ref{nncp-toss, tossing}, that involves your private
+cryptographic keys. So even if your loose your computer, storage devices
+and so on -- it is not so bad, because you are not carrying private keys
+with it (don't you?), you do not "toss" those packets immediately on the
+same device. Tossing (reading those encrypted packets and extracting
+transferred files and mail messages) could and should be done on a
+separate computer.
@unnumbered Workflow
NNCP consists of several utilities. As a rule you will have the
-following workflow with them.
+following workflow:
@enumerate
@item Run @ref{nncp-newnode} on each node to create an initial
required configuration about their reachability, permissions of file or
freq transmission.
@item Use @ref{nncp-file}, @ref{nncp-freq}, @ref{nncp-mail}
-(@ref{Postfix, look how} Postfix SMTP server could be configured for its
-usage) commands to queue file, freq and mail transmissions. Repeat as
+(@ref{Postfix, look how} Postfix SMTP server could be configured)
+commands to queue file, freq and mail transmissions. Repeat as
many times any time as you wish.
@item Depending on connection methods, either:
@itemize
@item run @ref{nncp-daemon} to accept remotely initiated connections
to your node
- @item run @ref{nncp-call} to initiate connection to required nodes
+ @item run either @ref{nncp-call} or @ref{nncp-caller} to initiate
+ connection to required nodes from time to time
@item use @ref{nncp-xfer} with removable storage devices for copying
- packets for/from other nodes
+ packets to/from other nodes
@end itemize
@item After successful packet exchanging (or just simply from time to
time), run @ref{nncp-toss} for tossing (decrypting and processing) all
find src -name .travis.yml -delete
rm -fr src/github.com/davecgh/go-xdr/xdr
+rm -fr src/github.com/gorhill/cronexpr/cronexpr src/github.com/gorhill/cronexpr/APLv2
+rm -fr ports
+rm makedist.sh
cat > doc/download.texi <<EOF
@node Tarballs
cat <<EOF
Subject: NNCP $release release announcement
-I am pleased to announce nncp $release release availability!
+I am pleased to announce NNCP $release release availability!
NNCP (Node to Node copy) is a collection of utilities simplifying
secure store-and-forward files and mail exchanging.
--- /dev/null
+# $FreeBSD$
+
+PORTNAME= nncp
+PORTVERSION= 0.2
+CATEGORIES= net
+MASTER_SITES= http://www.nncpgo.org/download/ \
+ http://sourceforge.net/projects/nncp/files/
+
+MAINTAINER= stargrave@stargrave.org
+COMMENT= Utilities for secure store-and-forward files and mail exchanging
+
+LICENSE= GPLv3+
+LICENSE_FILE= ${WRKSRC}/COPYING
+
+BUILD_DEPENDS= go:lang/go
+
+USES= tar:xz
+MAKEFILE= BSDmakefile
+
+USE_RC_SUBR= nncp-caller nncp-daemon nncp-toss
+REQUIRE= DAEMON
+
+SUB_FILES= pkg-message pkg-deinstall nncp.newsyslog.conf
+
+PORTDOCS= AUTHORS NEWS README THANKS
+INFO= nncp
+INSTALL_TARGET= install-strip
+
+PLIST_FILES= bin/nncp-call \
+ bin/nncp-caller \
+ bin/nncp-check \
+ bin/nncp-daemon \
+ bin/nncp-file \
+ bin/nncp-freq \
+ bin/nncp-log \
+ bin/nncp-mail \
+ bin/nncp-newnode \
+ bin/nncp-pkt \
+ bin/nncp-stat \
+ bin/nncp-toss \
+ bin/nncp-xfer \
+ etc/newsyslog.conf.d/nncp.conf
+PLIST_DIRS= /var/spool/nncp
+
+post-install:
+ ${MKDIR} ${STAGEDIR}${PREFIX}/etc/newsyslog.conf.d
+ ${INSTALL_DATA} ${WRKDIR}/nncp.newsyslog.conf ${STAGEDIR}${PREFIX}/etc/newsyslog.conf.d/nncp.conf
+ ${MKDIR} ${STAGEDIR}/var/spool/nncp
+
+.include <bsd.port.mk>
--- /dev/null
+#!/bin/sh
+#
+# $FreeBSD$
+#
+# PROVIDE: nncp-caller
+# REQUIRE: DAEMON NETWORKING FILESYSTEMS
+# KEYWORD: shutdown
+#
+# Add these lines to /etc/rc.conf.local or /etc/rc.conf
+# to enable this service:
+#
+# nncp_caller_enable (bool): Set to NO by default.
+# Set it to YES to enable nncp-caller.
+# nncp_caller_config (path): Set to %%PREFIX%%/etc/nncp.yaml by default.
+
+. /etc/rc.subr
+
+name=nncp_caller
+rcvar=nncp_caller_enable
+
+load_rc_config $name
+
+: ${nncp_caller_enable:="NO"}
+: ${nncp_caller_config="%%PREFIX%%/etc/nncp.yaml"}
+
+command=%%PREFIX%%/bin/nncp-caller
+command_args="-quiet -cfg $nncp_caller_config &"
+
+run_rc_command "$1"
--- /dev/null
+#!/bin/sh
+#
+# $FreeBSD$
+#
+# PROVIDE: nncp-daemon
+# REQUIRE: DAEMON NETWORKING FILESYSTEMS
+# KEYWORD: shutdown
+#
+# Add these lines to /etc/rc.conf.local or /etc/rc.conf
+# to enable this service:
+#
+# nncp_daemon_enable (bool): Set to NO by default.
+# Set it to YES to enable nncp-daemon.
+# nncp_daemon_config (path): Set to %%PREFIX%%/etc/nncp.yaml by default.
+# nncp_daemon_bind (string): Address:port to bind to
+# Default is "[::]:5400".
+
+. /etc/rc.subr
+
+name=nncp_daemon
+rcvar=nncp_daemon_enable
+
+load_rc_config $name
+
+: ${nncp_daemon_enable:="NO"}
+: ${nncp_daemon_config="%%PREFIX%%/etc/nncp.yaml"}
+: ${nncp_daemon_bind="[::]:5400"}
+
+command=%%PREFIX%%/bin/nncp-daemon
+command_args="-quiet -bind $nncp_daemon_bind -cfg $nncp_daemon_config &"
+
+run_rc_command "$1"
--- /dev/null
+#!/bin/sh
+#
+# $FreeBSD$
+#
+# PROVIDE: nncp-toss
+# REQUIRE: DAEMON NETWORKING FILESYSTEMS
+# KEYWORD: shutdown
+#
+# Add these lines to /etc/rc.conf.local or /etc/rc.conf
+# to enable this service:
+#
+# nncp_toss_enable (bool): Set to NO by default.
+# Set it to YES to enable nncp-toss.
+# nncp_toss_config (path): Set to %%PREFIX%%/etc/nncp.yaml by default.
+# nncp_toss_cycle (int): Repeat tossing after that number of seconds.
+# Default is "60".
+
+. /etc/rc.subr
+
+name=nncp_toss
+rcvar=nncp_toss_enable
+
+load_rc_config $name
+
+: ${nncp_toss_enable:="NO"}
+: ${nncp_toss_config="%%PREFIX%%/etc/nncp.yaml"}
+: ${nncp_toss_cycle="60"}
+
+command=%%PREFIX%%/bin/nncp-toss
+command_args="-quiet -cycle $nncp_toss_cycle -cfg $nncp_toss_config &"
+
+run_rc_command "$1"
--- /dev/null
+/var/spool/nncp/log 644 7 100 * CXN
--- /dev/null
+#!/bin/sh
+
+if [ "$2" != "POST-DEINSTALL" ]; then
+ exit 0
+fi
+
+if [ -e %%PREFIX%%/etc/nncp.yaml ]; then
+ echo "%%PREFIX%%/etc/nncp.yaml with your private keys is not removed!"
+fi
+
+if [ -e /var/spool/nncp ]; then
+ echo "/var/spool/nncp is not removed!"
+fi
--- /dev/null
+======================================================================
+- Add the following lines to /etc/rc.conf to enable nncp-daemon,
+ nncp-caller and nncp-toss:
+
+ nncp_daemon_enable="YES"
+ nncp_caller_enable="YES"
+ nncp_toss_enable="YES"
+
+- %%PREFIX%%/etc/newsyslog.conf.d/nncp.conf
+ log rotation configuration has been installed.
+- /var/spool/nncp is the packet spool directory.
+- Look in nncp.info how to integrate mail exchanging with Postfix.
+- Generate NNCP configuration file using the command:
+
+ # umask 077
+ # nncp-newnode > %%PREFIX%%/etc/nncp.yaml
+======================================================================
--- /dev/null
+NNCP (Node to Node copy) is a collection of utilities simplifying
+secure store-and-forward files and mail exchanging.
+
+This utilities are intended to help build up small size (dozens of
+nodes) ad-hoc friend-to-friend (F2F) statically routed darknet networks
+for fire-and-forget secure reliable files, file requests and Internet
+mail transmission. All packets are integrity checked, end-to-end
+encrypted (E2EE), explicitly authenticated by known participants public
+keys. Onion encryption is applied to relayed packets. Each node acts
+both as a client and server, can use push and poll behaviour model.
+
+Out-of-box offline sneakernet/floppynet, dead drops and air-gapped
+computers support. But online TCP daemon with full-duplex resumable data
+transmission exists.
+
+WWW: http://www.nncpgo.org/
--- /dev/null
+/*
+NNCP -- Node to Node copy, utilities for store-and-forward data exchange
+Copyright (C) 2016-2017 Sergey Matveev <stargrave@stargrave.org>
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+package nncp
+
+import (
+ "net"
+ "strconv"
+)
+
+func (ctx *Ctx) CallNode(node *Node, addrs []string, nice uint8, xxOnly *TRxTx, onlineDeadline, maxOnlineTime uint) (isGood bool) {
+ for _, addr := range addrs {
+ sds := SDS{"node": node.Id, "addr": addr}
+ ctx.LogD("call", sds, "dialing")
+ conn, err := net.Dial("tcp", addr)
+ if err != nil {
+ ctx.LogD("call", SdsAdd(sds, SDS{"err": err}), "dialing")
+ continue
+ }
+ ctx.LogD("call", sds, "connected")
+ state, err := ctx.StartI(
+ conn,
+ node.Id,
+ nice,
+ xxOnly,
+ onlineDeadline,
+ maxOnlineTime,
+ )
+ if err == nil {
+ ctx.LogI("call-start", sds, "connected")
+ state.Wait()
+ ctx.LogI("call-finish", SDS{
+ "node": state.Node.Id,
+ "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10),
+ "rxbytes": strconv.FormatInt(state.RxBytes, 10),
+ "txbytes": strconv.FormatInt(state.TxBytes, 10),
+ "rxspeed": strconv.FormatInt(state.RxSpeed, 10),
+ "txspeed": strconv.FormatInt(state.TxSpeed, 10),
+ }, "")
+ isGood = true
+ conn.Close()
+ break
+ } else {
+ ctx.LogE("call-start", SdsAdd(sds, SDS{"err": err}), "")
+ conn.Close()
+ }
+ }
+ return
+}
import (
"errors"
+ "os"
"path"
+ "github.com/gorhill/cronexpr"
"golang.org/x/crypto/ed25519"
"gopkg.in/yaml.v2"
)
+const (
+ CfgPathEnv = "NNCPCFG"
+)
+
var (
DefaultCfgPath string = "/usr/local/etc/nncp.yaml"
DefaultSendmailPath string = "/usr/sbin/sendmail"
+ DefaultSpoolPath string = "/var/spool/nncp"
+ DefaultLogPath string = "/var/spool/nncp/log"
)
type NodeYAML struct {
Id string
ExchPub string
SignPub string
- NoisePub string
+ NoisePub *string `noisepub,omitempty`
Sendmail []string
- Incoming *string `incoming,omitempty`
- Freq *string `freq,omitempty`
- Via []string `via,omitempty`
+ Incoming *string `incoming,omitempty`
+ Freq *string `freq,omitempty`
+ Via []string `via,omitempty`
+ Calls []CallYAML `calls,omitempty`
Addrs map[string]string `addrs,omitempty`
+
+ OnlineDeadline *uint `onlinedeadline,omitempty`
+ MaxOnlineTime *uint `maxonlinetime,omitempty`
+}
+
+type CallYAML struct {
+ Cron string
+ Nice *int `nice,omitempty`
+ Xx *string `xx,omitempty`
+ Addr *string `addr,omitempty`
+ OnlineDeadline *uint `onlinedeadline,omitempty`
+ MaxOnlineTime *uint `maxonlinetime,omitempty`
}
type NodeOurYAML struct {
return nil, errors.New("Invalid signPub size")
}
- noisePub, err := FromBase32(yml.NoisePub)
- if err != nil {
- return nil, err
- }
- if len(noisePub) != 32 {
- return nil, errors.New("Invalid noisePub size")
+ var noisePub []byte
+ if yml.NoisePub != nil {
+ noisePub, err = FromBase32(*yml.NoisePub)
+ if err != nil {
+ return nil, err
+ }
+ if len(noisePub) != 32 {
+ return nil, errors.New("Invalid noisePub size")
+ }
}
var incoming *string
freq = &fr
}
+ defOnlineDeadline := uint(DefaultDeadline)
+ if yml.OnlineDeadline != nil {
+ if *yml.OnlineDeadline <= 0 {
+ return nil, errors.New("OnlineDeadline must be at least 1 second")
+ }
+ defOnlineDeadline = *yml.OnlineDeadline
+ }
+ var defMaxOnlineTime uint
+ if yml.MaxOnlineTime != nil {
+ defMaxOnlineTime = *yml.MaxOnlineTime
+ }
+
+ var calls []*Call
+ for _, callYml := range yml.Calls {
+ expr, err := cronexpr.Parse(callYml.Cron)
+ if err != nil {
+ return nil, err
+ }
+ nice := uint8(255)
+ if callYml.Nice != nil {
+ if *callYml.Nice < 1 || *callYml.Nice > 255 {
+ return nil, errors.New("Nice must be between 1 and 255")
+ }
+ nice = uint8(*callYml.Nice)
+ }
+ var xx TRxTx
+ if callYml.Xx != nil {
+ switch *callYml.Xx {
+ case "rx":
+ xx = TRx
+ case "tx":
+ xx = TTx
+ default:
+ return nil, errors.New("xx field must be either \"rx\" or \"tx\"")
+ }
+ }
+ var addr *string
+ if callYml.Addr != nil {
+ if a, exists := yml.Addrs[*callYml.Addr]; exists {
+ addr = &a
+ } else {
+ addr = callYml.Addr
+ }
+ }
+ onlineDeadline := defOnlineDeadline
+ if callYml.OnlineDeadline != nil {
+ if *callYml.OnlineDeadline == 0 {
+ return nil, errors.New("OnlineDeadline must be at least 1 second")
+ }
+ onlineDeadline = *callYml.OnlineDeadline
+ }
+ var maxOnlineTime uint
+ if callYml.MaxOnlineTime != nil {
+ maxOnlineTime = *callYml.MaxOnlineTime
+ }
+ calls = append(calls, &Call{
+ Cron: expr,
+ Nice: nice,
+ Xx: &xx,
+ Addr: addr,
+ OnlineDeadline: onlineDeadline,
+ MaxOnlineTime: maxOnlineTime,
+ })
+ }
+
node := Node{
- Name: name,
- Id: nodeId,
- ExchPub: new([32]byte),
- SignPub: ed25519.PublicKey(signPub),
- NoisePub: new([32]byte),
- Sendmail: yml.Sendmail,
- Incoming: incoming,
- Freq: freq,
- Addrs: yml.Addrs,
+ Name: name,
+ Id: nodeId,
+ ExchPub: new([32]byte),
+ SignPub: ed25519.PublicKey(signPub),
+ Sendmail: yml.Sendmail,
+ Incoming: incoming,
+ Freq: freq,
+ Calls: calls,
+ Addrs: yml.Addrs,
+ OnlineDeadline: defOnlineDeadline,
+ MaxOnlineTime: defMaxOnlineTime,
}
copy(node.ExchPub[:], exchPub)
- copy(node.NoisePub[:], noisePub)
+ if len(noisePub) > 0 {
+ node.NoisePub = new([32]byte)
+ copy(node.NoisePub[:], noisePub)
+ }
return &node, nil
}
}
return &ctx, nil
}
+
+func CfgPathFromEnv(cmdlineFlag *string) (p string) {
+ p = os.Getenv(CfgPathEnv)
+ if p == "" {
+ p = *cmdlineFlag
+ }
+ return
+}
"fmt"
"io/ioutil"
"log"
- "net"
"os"
- "strconv"
"strings"
"cypherpunks.ru/nncp"
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
+
+ onlineDeadline = flag.Uint("onlinedeadline", 0, "Override onlinedeadline option")
+ maxOnlineTime = flag.Uint("maxonlinetime", 0, "Override maxonlinetime option")
)
flag.Usage = usage
flag.Parse()
log.Fatalln("-rx and -tx can not be set simultaneously")
}
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
if err != nil {
log.Fatalln("Invalid NODE specified:", err)
}
+ if node.NoisePub == nil {
+ log.Fatalln("Node does not have online communication capability")
+ }
+
+ if *onlineDeadline == 0 {
+ onlineDeadline = &node.OnlineDeadline
+ }
+ if *maxOnlineTime == 0 {
+ maxOnlineTime = &node.MaxOnlineTime
+ }
var xxOnly nncp.TRxTx
if *rxOnly {
}
}
- isGood := false
- for _, addr := range addrs {
- ctx.LogD("call", nncp.SDS{"addr": addr}, "dialing")
- conn, err := net.Dial("tcp", addr)
- if err != nil {
- log.Println("Can not connect:", err)
- continue
- }
- ctx.LogD("call", nncp.SDS{"addr": addr}, "connected")
- state, err := ctx.StartI(conn, node.Id, nice, &xxOnly)
- if err == nil {
- ctx.LogI("call-start", nncp.SDS{"node": state.NodeId}, "connected")
- state.Wait()
- ctx.LogI("call-finish", nncp.SDS{
- "node": state.NodeId,
- "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10),
- "rxbytes": strconv.FormatInt(state.RxBytes, 10),
- "txbytes": strconv.FormatInt(state.TxBytes, 10),
- "rxspeed": strconv.FormatInt(state.RxSpeed, 10),
- "txspeed": strconv.FormatInt(state.TxSpeed, 10),
- }, "")
- isGood = true
- conn.Close()
- break
- } else {
- ctx.LogE("call-start", nncp.SDS{"node": state.NodeId, "err": err}, "")
- conn.Close()
- }
- }
- if !isGood {
+ if !ctx.CallNode(node, addrs, nice, &xxOnly, *onlineDeadline, *maxOnlineTime) {
os.Exit(1)
}
}
--- /dev/null
+/*
+NNCP -- Node to Node copy, utilities for store-and-forward data exchange
+Copyright (C) 2016-2017 Sergey Matveev <stargrave@stargrave.org>
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+// Croned NNCP TCP daemon caller
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+
+ "cypherpunks.ru/nncp"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, nncp.UsageHeader())
+ fmt.Fprintln(os.Stderr, "nncp-caller -- croned NNCP TCP daemon caller\n")
+ fmt.Fprintf(os.Stderr, "Usage: %s [options] [NODE ...]\n", os.Args[0])
+ fmt.Fprintln(os.Stderr, "Options:")
+ flag.PrintDefaults()
+}
+
+func main() {
+ var (
+ cfgPath = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
+ quiet = flag.Bool("quiet", false, "Print only errors")
+ debug = flag.Bool("debug", false, "Print debug messages")
+ version = flag.Bool("version", false, "Print version information")
+ warranty = flag.Bool("warranty", false, "Print warranty information")
+ )
+ flag.Usage = usage
+ flag.Parse()
+ if *warranty {
+ fmt.Println(nncp.Warranty)
+ return
+ }
+ if *version {
+ fmt.Println(nncp.VersionGet())
+ return
+ }
+
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
+ if err != nil {
+ log.Fatalln("Can not read config:", err)
+ }
+ ctx, err := nncp.CfgParse(cfgRaw)
+ if err != nil {
+ log.Fatalln("Can not parse config:", err)
+ }
+ ctx.Quiet = *quiet
+ ctx.Debug = *debug
+
+ var nodes []*nncp.Node
+ if flag.NArg() > 0 {
+ for _, nodeId := range flag.Args() {
+ node, err := ctx.FindNode(nodeId)
+ if err != nil {
+ log.Fatalln("Invalid NODE specified:", err)
+ }
+ if len(node.Calls) == 0 {
+ ctx.LogD("caller", nncp.SDS{"node": node.Id}, "has no calls, skipping")
+ continue
+ }
+ nodes = append(nodes, node)
+ }
+ } else {
+ for _, node := range ctx.Neigh {
+ if len(node.Calls) == 0 {
+ ctx.LogD("caller", nncp.SDS{"node": node.Id}, "has no calls, skipping")
+ continue
+ }
+ nodes = append(nodes, node)
+ }
+ }
+
+ var wg sync.WaitGroup
+ for _, node := range nodes {
+ for i, call := range node.Calls {
+ wg.Add(1)
+ go func(node *nncp.Node, i int, call *nncp.Call) {
+ defer wg.Done()
+ var addrs []string
+ if call.Addr == nil {
+ for _, addr := range node.Addrs {
+ addrs = append(addrs, addr)
+ }
+ } else {
+ addrs = append(addrs, *call.Addr)
+ }
+ sds := nncp.SDS{"node": node.Id, "callindex": strconv.Itoa(i)}
+ for {
+ n := time.Now()
+ t := call.Cron.Next(n)
+ ctx.LogD("caller", sds, t.String())
+ if t.IsZero() {
+ ctx.LogE("caller", sds, "got zero time")
+ return
+ }
+ time.Sleep(t.Sub(n))
+ node.Lock()
+ if node.Busy {
+ node.Unlock()
+ ctx.LogD("caller", sds, "busy")
+ continue
+ } else {
+ node.Busy = true
+ node.Unlock()
+ ctx.CallNode(
+ node,
+ addrs,
+ call.Nice,
+ call.Xx,
+ call.OnlineDeadline,
+ call.MaxOnlineTime,
+ )
+ node.Lock()
+ node.Busy = false
+ node.Unlock()
+ }
+ }
+ }(node, i, call)
+ }
+ }
+ wg.Wait()
+}
return
}
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
}
nice := uint8(*niceRaw)
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
go func(conn net.Conn) {
state, err := ctx.StartR(conn, nice, nil)
if err == nil {
- ctx.LogI("call-start", nncp.SDS{"node": state.NodeId}, "connected")
+ ctx.LogI("call-start", nncp.SDS{"node": state.Node.Id}, "connected")
state.Wait()
ctx.LogI("call-finish", nncp.SDS{
- "node": state.NodeId,
+ "node": state.Node.Id,
"duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10),
"rxbytes": strconv.FormatInt(state.RxBytes, 10),
"txbytes": strconv.FormatInt(state.TxBytes, 10),
"txspeed": strconv.FormatInt(state.TxSpeed, 10),
}, "")
} else {
- var nodeId string
- if state == nil {
- nodeId = "unknown"
- } else {
- nodeId = state.NodeId.String()
+ nodeId := "unknown"
+ if state != nil && state.Node != nil {
+ nodeId = state.Node.Id.String()
}
ctx.LogE("call-start", nncp.SDS{"node": nodeId, "err": err}, "")
}
var (
cfgPath = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
niceRaw = flag.Int("nice", nncp.DefaultNiceMail, "Outbound packet niceness")
+ minSize = flag.Uint64("minsize", 0, "Minimal required resulting packet size")
quiet = flag.Bool("quiet", false, "Print only errors")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
}
nice := uint8(*niceRaw)
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
log.Fatalln("Invalid NODE specified:", err)
}
- if err = ctx.TxFile(node, nice, flag.Arg(0), splitted[1]); err != nil {
+ if err = ctx.TxFile(node, nice, flag.Arg(0), splitted[1], int64(*minSize)); err != nil {
log.Fatalln(err)
}
}
var (
cfgPath = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
niceRaw = flag.Int("nice", nncp.DefaultNiceMail, "Outbound packet niceness")
+ minSize = flag.Uint64("minsize", 0, "Minimal required resulting packet size")
quiet = flag.Bool("quiet", false, "Print only errors")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
}
nice := uint8(*niceRaw)
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
log.Fatalln("Invalid NODE specified:", err)
}
- if err = ctx.TxFreq(node, nice, splitted[1], flag.Arg(1)); err != nil {
+ if err = ctx.TxFreq(node, nice, splitted[1], flag.Arg(1), int64(*minSize)); err != nil {
log.Fatalln(err)
}
}
return
}
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
fmt.Fprintln(os.Stderr, "nncp-mail -- send email\n")
fmt.Fprintf(os.Stderr, "Usage: %s [options] NODE USER ...\nOptions:\n", os.Args[0])
flag.PrintDefaults()
- fmt.Fprintln(os.Stderr, "Email body is read from stdin.")
}
func main() {
var (
cfgPath = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
niceRaw = flag.Int("nice", nncp.DefaultNiceMail, "Outbound packet niceness")
+ minSize = flag.Uint64("minsize", 0, "Minimal required resulting packet size")
quiet = flag.Bool("quiet", false, "Print only errors")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
}
nice := uint8(*niceRaw)
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
log.Fatalln("Can not read mail body from stdin:", err)
}
- if err = ctx.TxMail(node, nice, strings.Join(flag.Args()[1:], " "), body); err != nil {
+ if err = ctx.TxMail(node, nice, strings.Join(flag.Args()[1:], " "), body, int64(*minSize)); err != nil {
log.Fatalln(err)
}
}
if err != nil {
panic(err)
}
- incoming := "/path/to/upload/dir, omit it to forbid uploading"
- freq := "/path/to/freq/able/dir, omit to forbid freqing"
+ noisePub := nncp.ToBase32(nodeOur.NoisePub[:])
cfg := nncp.CfgYAML{
Self: nncp.NodeOurYAML{
Id: nodeOur.Id.String(),
Id: nodeOur.Id.String(),
ExchPub: nncp.ToBase32(nodeOur.ExchPub[:]),
SignPub: nncp.ToBase32(nodeOur.SignPub[:]),
- NoisePub: nncp.ToBase32(nodeOur.NoisePub[:]),
+ NoisePub: &noisePub,
Sendmail: []string{nncp.DefaultSendmailPath},
- Incoming: &incoming,
- Freq: &freq,
- Addrs: map[string]string{"main": "localhost:5400"},
- },
- },
- Spool: "/path/to/spool",
- Log: "/path/to/log.file",
- Notify: &nncp.NotifyYAML{
- File: &nncp.FromToYAML{
- From: "nncp@localhost",
- To: "root@localhost, delete section to disable notifies",
- },
- Freq: &nncp.FromToYAML{
- From: "nncp@localhost",
- To: "root@localhost, delete section to disable notifies",
},
},
+ Spool: nncp.DefaultSpoolPath,
+ Log: nncp.DefaultLogPath,
}
raw, err := yaml.Marshal(&cfg)
if err != nil {
import (
"bufio"
"bytes"
+ "compress/zlib"
"flag"
"fmt"
"io"
"cypherpunks.ru/nncp"
"github.com/davecgh/go-xdr/xdr2"
- "github.com/dustin/go-humanize"
"golang.org/x/crypto/blake2b"
)
func main() {
var (
- dump = flag.Bool("dump", false, "Write decrypted/parsed payload to stdout")
- cfgPath = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
- version = flag.Bool("version", false, "Print version information")
- warranty = flag.Bool("warranty", false, "Print warranty information")
+ dump = flag.Bool("dump", false, "Write decrypted/parsed payload to stdout")
+ decompress = flag.Bool("decompress", false, "Try to zlib decompress dumped data")
+ cfgPath = flag.String("cfg", nncp.DefaultCfgPath, "Path to configuration file")
+ version = flag.Bool("version", false, "Print version information")
+ warranty = flag.Bool("warranty", false, "Print warranty information")
)
flag.Usage = usage
flag.Parse()
}
var err error
- beginning := make([]byte, nncp.PktOverhead-blake2b.Size256)
+ beginning := make([]byte, nncp.PktOverhead-8-2*blake2b.Size256)
if _, err = io.ReadFull(os.Stdin, beginning); err != nil {
log.Fatalln("Not enough data to read")
}
if err == nil && pkt.Magic == nncp.MagicNNCPPv1 {
if *dump {
bufW := bufio.NewWriter(os.Stdout)
- if _, err = io.Copy(bufW, bufio.NewReader(os.Stdin)); err != nil {
+ var r io.Reader
+ r = bufio.NewReader(os.Stdin)
+ if *decompress {
+ decompressor, err := zlib.NewReader(r)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ r = decompressor
+ }
+ if _, err = io.Copy(bufW, r); err != nil {
log.Fatalln(err)
}
if err = bufW.Flush(); err != nil {
_, err = xdr.Unmarshal(bytes.NewReader(beginning), &pktEnc)
if err == nil && pktEnc.Magic == nncp.MagicNNCPEv1 {
if *dump {
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
log.Fatalln("Can not parse config:", err)
}
bufW := bufio.NewWriter(os.Stdout)
- if _, err = nncp.PktEncRead(
+ if _, _, err = nncp.PktEncRead(
ctx.Self,
ctx.Neigh,
io.MultiReader(
return
}
fmt.Printf(
- "Packet type: encrypted\nNiceness: %d\nSender: %s\nPayload size: %s (%d bytes)\n",
- pktEnc.Nice, pktEnc.Sender, humanize.IBytes(pktEnc.Size), pktEnc.Size,
+ "Packet type: encrypted\nNiceness: %d\nSender: %s\n",
+ pktEnc.Nice, pktEnc.Sender,
)
return
}
return
}
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
"io/ioutil"
"log"
"os"
+ "time"
"cypherpunks.ru/nncp"
)
nodeRaw = flag.String("node", "", "Process only that node")
niceRaw = flag.Int("nice", 255, "Minimal required niceness")
dryRun = flag.Bool("dryrun", false, "Do not actually write any tossed data")
+ cycle = flag.Uint("cycle", 0, "Repeat tossing after N seconds in infinite loop")
quiet = flag.Bool("quiet", false, "Print only errors")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
}
nice := uint8(*niceRaw)
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
}
}
+Cycle:
isBad := false
for nodeId, node := range ctx.Neigh {
if nodeOnly != nil && nodeId != *nodeOnly.Id {
}
isBad = ctx.Toss(node.Id, nice, *dryRun)
}
+ if *cycle > 0 {
+ time.Sleep(time.Duration(*cycle) * time.Second)
+ goto Cycle
+ }
if isBad {
os.Exit(1)
}
log.Fatalln("-rx and -tx can not be set simultaneously")
}
- cfgRaw, err := ioutil.ReadFile(*cfgPath)
+ cfgRaw, err := ioutil.ReadFile(nncp.CfgPathFromEnv(cfgPath))
if err != nil {
log.Fatalln("Can not read config:", err)
}
fd.Close()
continue
}
- fd.Seek(0, 0)
+ fd.Seek(0, io.SeekStart)
tmp, err := ctx.NewTmpFileWHash()
if err != nil {
log.Fatalln(err)
package nncp
import (
+ "io"
"os"
"path/filepath"
"strconv"
fd.Close()
continue
}
- fd.Seek(0, 0)
+ fd.Seek(0, io.SeekStart)
ctx.LogD("jobs", SDS{
"xx": string(xx),
"node": pktEnc.Sender,
import (
"crypto/rand"
"errors"
+ "sync"
"github.com/flynn/noise"
+ "github.com/gorhill/cronexpr"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/nacl/box"
}
type Node struct {
- Name string
- Id *NodeId
- ExchPub *[32]byte
- SignPub ed25519.PublicKey
- NoisePub *[32]byte
- Sendmail []string
- Incoming *string
- Freq *string
- Via []*NodeId
- Addrs map[string]string
+ Name string
+ Id *NodeId
+ ExchPub *[32]byte
+ SignPub ed25519.PublicKey
+ NoisePub *[32]byte
+ Sendmail []string
+ Incoming *string
+ Freq *string
+ Via []*NodeId
+ Addrs map[string]string
+ OnlineDeadline uint
+ MaxOnlineTime uint
+ Calls []*Call
+
+ Busy bool
+ sync.Mutex
}
type NodeOur struct {
NoisePrv *[32]byte
}
+type Call struct {
+ Cron *cronexpr.Expression
+ Nice uint8
+ Xx *TRxTx
+ Addr *string
+ OnlineDeadline uint
+ MaxOnlineTime uint
+}
+
func NewNodeGenerate() (*NodeOur, error) {
exchPub, exchPrv, err := box.GenerateKey(rand.Reader)
if err != nil {
)
var (
- MagicNNCPPv1 [8]byte = [8]byte{'N', 'N', 'C', 'P', 'P', 1, 0, 0}
- MagicNNCPEv1 [8]byte = [8]byte{'N', 'N', 'C', 'P', 'E', 1, 0, 0}
+ MagicNNCPPv1 [8]byte = [8]byte{'N', 'N', 'C', 'P', 'P', 0, 0, 1}
+ MagicNNCPEv1 [8]byte = [8]byte{'N', 'N', 'C', 'P', 'E', 0, 0, 1}
BadMagic error = errors.New("Unknown magic number")
BadPktType error = errors.New("Unknown packet type")
Recipient *NodeId
Sender *NodeId
ExchPub *[32]byte
- Size uint64
}
type PktEnc struct {
Sender *NodeId
ExchPub *[32]byte
Sign *[ed25519.SignatureSize]byte
- Size uint64
}
func init() {
if err != nil {
panic(err)
}
- PktOverhead = int64(n) + blake2b.Size256
+ PktOverhead = 8 + blake2b.Size256 + int64(n) + blake2b.Size256
buf.Reset()
dummyId, err := NodeIdFromString("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
Sender: dummyId,
ExchPub: new([32]byte),
Sign: new([ed25519.SignatureSize]byte),
- Size: 123,
}
n, err = xdr.Marshal(&buf, pktEnc)
if err != nil {
return h
}
-func PktEncWrite(our *NodeOur, their *Node, pkt *Pkt, nice uint8, size int64, data io.Reader, out io.Writer) error {
+type DevZero struct{}
+
+func (d DevZero) Read(b []byte) (n int, err error) {
+ for n = 0; n < len(b); n++ {
+ b[n] = 0
+ }
+ return
+}
+
+func PktEncWrite(our *NodeOur, their *Node, pkt *Pkt, nice uint8, size, padSize int64, data io.Reader, out io.Writer) error {
pubEph, prvEph, err := box.GenerateKey(rand.Reader)
if err != nil {
return err
Recipient: their.Id,
Sender: our.Id,
ExchPub: pubEph,
- Size: uint64(size + PktOverhead),
}
var tbsBuf bytes.Buffer
if _, err = xdr.Marshal(&tbsBuf, &tbs); err != nil {
Sender: our.Id,
ExchPub: pubEph,
Sign: signature,
- Size: tbs.Size,
}
if _, err = xdr.Marshal(out, &pktEnc); err != nil {
return err
sharedKey := new([32]byte)
curve25519.ScalarMult(sharedKey, prvEph, their.ExchPub)
kdf := hkdf.New(blake256, sharedKey[:], nil, MagicNNCPEv1[:])
+
keyEnc := make([]byte, 32)
if _, err = io.ReadFull(kdf, keyEnc); err != nil {
return err
if _, err = io.ReadFull(kdf, keyAuth); err != nil {
return err
}
+
ciph, err := twofish.NewCipher(keyEnc)
if err != nil {
return err
if err != nil {
return err
}
+
mw := io.MultiWriter(out, mac)
ae := &cipher.StreamWriter{S: ctr, W: mw}
+ usize := uint64(size)
+ if _, err = xdr.Marshal(ae, &usize); err != nil {
+ return err
+ }
+ ae.Close()
+ out.Write(mac.Sum(nil))
+
+ if _, err = io.ReadFull(kdf, keyEnc); err != nil {
+ return err
+ }
+ if _, err = io.ReadFull(kdf, keyAuth); err != nil {
+ return err
+ }
+
+ ciph, err = twofish.NewCipher(keyEnc)
+ if err != nil {
+ return err
+ }
+ ctr = cipher.NewCTR(ciph, make([]byte, twofish.BlockSize))
+ mac, err = blake2b.New256(keyAuth)
+ if err != nil {
+ return err
+ }
+
+ mw = io.MultiWriter(out, mac)
+ ae = &cipher.StreamWriter{S: ctr, W: mw}
ae.Write(pktBuf.Bytes())
- if _, err = io.CopyN(ae, data, int64(size)); err != nil {
+ if _, err = io.CopyN(ae, data, size); err != nil {
return err
}
ae.Close()
out.Write(mac.Sum(nil))
+
+ if padSize > 0 {
+ if _, err = io.ReadFull(kdf, keyEnc); err != nil {
+ return err
+ }
+ ciph, err = twofish.NewCipher(keyEnc)
+ if err != nil {
+ return err
+ }
+ ctr = cipher.NewCTR(ciph, make([]byte, twofish.BlockSize))
+ ae = &cipher.StreamWriter{S: ctr, W: out}
+ if _, err = io.CopyN(ae, DevZero{}, padSize); err != nil {
+ return err
+ }
+ ae.Close()
+ }
return nil
}
Recipient: our.Id,
Sender: their.Id,
ExchPub: pktEnc.ExchPub,
- Size: pktEnc.Size,
}
var tbsBuf bytes.Buffer
if _, err := xdr.Marshal(&tbsBuf, &tbs); err != nil {
return ed25519.Verify(their.SignPub, tbsBuf.Bytes(), pktEnc.Sign[:]), nil
}
-func PktEncRead(our *NodeOur, nodes map[NodeId]*Node, data io.Reader, out io.Writer) (*Node, error) {
+func PktEncRead(our *NodeOur, nodes map[NodeId]*Node, data io.Reader, out io.Writer) (*Node, int64, error) {
var pktEnc PktEnc
_, err := xdr.Unmarshal(data, &pktEnc)
if err != nil {
- return nil, err
+ return nil, 0, err
}
if pktEnc.Magic != MagicNNCPEv1 {
- return nil, BadMagic
+ return nil, 0, BadMagic
}
their, known := nodes[*pktEnc.Sender]
if !known {
- return nil, errors.New("Unknown sender")
+ return nil, 0, errors.New("Unknown sender")
}
verified, err := TbsVerify(our, their, &pktEnc)
if err != nil {
- return nil, err
+ return nil, 0, err
}
if !verified {
- return their, errors.New("Invalid signature")
+ return their, 0, errors.New("Invalid signature")
}
sharedKey := new([32]byte)
curve25519.ScalarMult(sharedKey, our.ExchPrv, pktEnc.ExchPub)
kdf := hkdf.New(blake256, sharedKey[:], nil, MagicNNCPEv1[:])
+
keyEnc := make([]byte, 32)
if _, err = io.ReadFull(kdf, keyEnc); err != nil {
- return their, err
+ return their, 0, err
}
keyAuth := make([]byte, 64)
if _, err = io.ReadFull(kdf, keyAuth); err != nil {
- return their, err
+ return their, 0, err
}
+
ciph, err := twofish.NewCipher(keyEnc)
if err != nil {
- return their, err
+ return their, 0, err
}
ctr := cipher.NewCTR(ciph, make([]byte, twofish.BlockSize))
mac, err := blake2b.New256(keyAuth)
if err != nil {
- return their, err
+ return their, 0, err
}
- trA := io.TeeReader(data, mac)
- ae := &cipher.StreamReader{S: ctr, R: trA}
- if _, err = io.CopyN(out, ae, int64(pktEnc.Size)-blake2b.Size256); err != nil {
- return their, err
+
+ tr := io.TeeReader(data, mac)
+ ae := &cipher.StreamReader{S: ctr, R: tr}
+ var usize uint64
+ if _, err = xdr.Unmarshal(ae, &usize); err != nil {
+ return their, 0, err
}
tag := make([]byte, blake2b.Size256)
if _, err = io.ReadFull(data, tag); err != nil {
- return their, err
+ return their, 0, err
+ }
+ if subtle.ConstantTimeCompare(mac.Sum(nil), tag) != 1 {
+ return their, 0, errors.New("Unauthenticated size")
+ }
+ size := int64(usize)
+
+ if _, err = io.ReadFull(kdf, keyEnc); err != nil {
+ return their, size, err
+ }
+ if _, err = io.ReadFull(kdf, keyAuth); err != nil {
+ return their, size, err
+ }
+
+ ciph, err = twofish.NewCipher(keyEnc)
+ if err != nil {
+ return their, size, err
+ }
+ ctr = cipher.NewCTR(ciph, make([]byte, twofish.BlockSize))
+ mac, err = blake2b.New256(keyAuth)
+ if err != nil {
+ return their, size, err
+ }
+
+ tr = io.TeeReader(data, mac)
+ ae = &cipher.StreamReader{S: ctr, R: tr}
+ if _, err = io.CopyN(out, ae, PktOverhead+size-8-blake2b.Size256-blake2b.Size256); err != nil {
+ return their, size, err
+ }
+ if _, err = io.ReadFull(data, tag); err != nil {
+ return their, size, err
}
if subtle.ConstantTimeCompare(mac.Sum(nil), tag) != 1 {
- return their, errors.New("Unauthenticated payload")
+ return their, size, errors.New("Unauthenticated payload")
}
- return their, nil
+ return their, size, nil
}
if err != nil {
panic(err)
}
- f := func(path string, pathSize uint8, data [1 << 16]byte, size uint16) bool {
+ f := func(path string, pathSize uint8, data [1 << 16]byte, size, padSize uint16) bool {
dataR := bytes.NewReader(data[:])
var ct bytes.Buffer
if len(path) > int(pathSize) {
if err != nil {
panic(err)
}
- err = PktEncWrite(nodeOur, nodeTheir.Their(), pkt, 123, int64(size), dataR, &ct)
+ err = PktEncWrite(
+ nodeOur,
+ nodeTheir.Their(),
+ pkt,
+ 123,
+ int64(size),
+ int64(padSize),
+ dataR,
+ &ct,
+ )
if err != nil {
return false
}
if *pktEnc.Sender != *nodeOur.Id {
return false
}
- if pktEnc.Size != uint64(ct.Len()) {
- return false
- }
return true
}
if err := quick.Check(f, nil); err != nil {
if err != nil {
panic(err)
}
- f := func(path string, pathSize uint8, data [1 << 16]byte, size uint16) bool {
+ f := func(path string, pathSize uint8, data [1 << 16]byte, size, padSize uint16, junk []byte) bool {
dataR := bytes.NewReader(data[:])
var ct bytes.Buffer
if len(path) > int(pathSize) {
if err != nil {
panic(err)
}
- err = PktEncWrite(node1, node2.Their(), pkt, 123, int64(size), dataR, &ct)
+ err = PktEncWrite(
+ node1,
+ node2.Their(),
+ pkt,
+ 123,
+ int64(size),
+ int64(padSize),
+ dataR,
+ &ct,
+ )
if err != nil {
return false
}
+ ct.Write(junk)
var pt bytes.Buffer
nodes := make(map[NodeId]*Node)
nodes[*node1.Id] = node1.Their()
- node, err := PktEncRead(node2, nodes, &ct, &pt)
+ node, sizeGot, err := PktEncRead(node2, nodes, &ct, &pt)
if err != nil {
return false
}
if *node.Id != *node1.Id {
return false
}
+ if sizeGot != int64(size) {
+ return false
+ }
var pktBuf bytes.Buffer
xdr.Marshal(&pktBuf, &pkt)
return bytes.Compare(pt.Bytes(), append(pktBuf.Bytes(), data[:int(size)]...)) == 0
)
const (
- MaxSPSize = 2<<16 - 256
- PartSuffix = ".part"
- DeadlineDuration = 10
+ MaxSPSize = 1<<16 - 256
+ PartSuffix = ".part"
+ DefaultDeadline = 10
)
var (
- MagicNNCPLv1 [8]byte = [8]byte{'N', 'N', 'C', 'P', 'S', 1, 0, 0}
+ MagicNNCPLv1 [8]byte = [8]byte{'N', 'N', 'C', 'P', 'S', 0, 0, 1}
SPHeadOverhead int
SPInfoOverhead int
}
type SPState struct {
- ctx *Ctx
- NodeId *NodeId
- nice uint8
- hs *noise.HandshakeState
- csOur *noise.CipherState
- csTheir *noise.CipherState
- payloads chan []byte
- infosTheir map[[32]byte]*SPInfo
- queueTheir []*SPFreq
- wg sync.WaitGroup
- RxBytes int64
- RxLastSeen time.Time
- TxBytes int64
- TxLastSeen time.Time
- started time.Time
- Duration time.Duration
- RxSpeed int64
- TxSpeed int64
- rxLock *os.File
- txLock *os.File
- xxOnly *TRxTx
+ ctx *Ctx
+ Node *Node
+ onlineDeadline uint
+ maxOnlineTime uint
+ nice uint8
+ hs *noise.HandshakeState
+ csOur *noise.CipherState
+ csTheir *noise.CipherState
+ payloads chan []byte
+ infosTheir map[[32]byte]*SPInfo
+ infosOurSeen map[[32]byte]struct{}
+ queueTheir []*SPFreq
+ wg sync.WaitGroup
+ RxBytes int64
+ RxLastSeen time.Time
+ TxBytes int64
+ TxLastSeen time.Time
+ started time.Time
+ Duration time.Duration
+ RxSpeed int64
+ TxSpeed int64
+ rxLock *os.File
+ txLock *os.File
+ xxOnly *TRxTx
+ isDead bool
sync.RWMutex
}
-func (state *SPState) isDead() bool {
+func (state *SPState) NotAlive() bool {
+ if state.isDead {
+ return true
+ }
now := time.Now()
- return now.Sub(state.RxLastSeen).Seconds() >= DeadlineDuration && now.Sub(state.TxLastSeen).Seconds() >= DeadlineDuration
+ if state.maxOnlineTime > 0 && state.started.Add(time.Duration(state.maxOnlineTime)*time.Second).Before(now) {
+ return true
+ }
+ return uint(now.Sub(state.RxLastSeen).Seconds()) >= state.onlineDeadline && uint(now.Sub(state.TxLastSeen).Seconds()) >= state.onlineDeadline
}
func (state *SPState) dirUnlock() {
func (state *SPState) ReadSP(src io.Reader) ([]byte, error) {
var sp SPRaw
- n, err := xdr.UnmarshalLimited(src, &sp, 2<<17)
+ n, err := xdr.UnmarshalLimited(src, &sp, 1<<17)
if err != nil {
return nil, err
}
return sp.Payload, nil
}
-func (ctx *Ctx) infosOur(nodeId *NodeId, nice uint8) [][]byte {
+func (ctx *Ctx) infosOur(nodeId *NodeId, nice uint8, seen *map[[32]byte]struct{}) [][]byte {
var infos []*SPInfo
var totalSize int64
for job := range ctx.Jobs(nodeId, TTx) {
if job.PktEnc.Nice > nice {
continue
}
+ if _, known := (*seen)[*job.HshValue]; known {
+ continue
+ }
totalSize += job.Size
infos = append(infos, &SPInfo{
Nice: job.PktEnc.Nice,
Size: uint64(job.Size),
Hash: job.HshValue,
})
+ (*seen)[*job.HshValue] = struct{}{}
}
sort.Sort(ByNice(infos))
var payloads [][]byte
"size": strconv.FormatInt(int64(info.Size), 10),
}, "")
}
- ctx.LogI("sp-infos", SDS{
- "xx": string(TTx),
- "node": nodeId,
- "pkts": strconv.Itoa(len(payloads)),
- "size": strconv.FormatInt(totalSize, 10),
- }, "")
+ if totalSize > 0 {
+ ctx.LogI("sp-infos", SDS{
+ "xx": string(TTx),
+ "node": nodeId,
+ "pkts": strconv.Itoa(len(payloads)),
+ "size": strconv.FormatInt(totalSize, 10),
+ }, "")
+ }
return payloadsSplit(payloads)
}
-func (ctx *Ctx) StartI(conn net.Conn, nodeId *NodeId, nice uint8, xxOnly *TRxTx) (*SPState, error) {
+func (ctx *Ctx) StartI(conn net.Conn, nodeId *NodeId, nice uint8, xxOnly *TRxTx, onlineDeadline, maxOnlineTime uint) (*SPState, error) {
err := ctx.ensureRxDir(nodeId)
if err != nil {
return nil, err
}
}
started := time.Now()
+ node := ctx.Neigh[*nodeId]
conf := noise.Config{
CipherSuite: NoiseCipherSuite,
Pattern: noise.HandshakeIK,
Private: ctx.Self.NoisePrv[:],
Public: ctx.Self.NoisePub[:],
},
- PeerStatic: ctx.Neigh[*nodeId].NoisePub[:],
+ PeerStatic: node.NoisePub[:],
}
state := SPState{
- ctx: ctx,
- hs: noise.NewHandshakeState(conf),
- NodeId: nodeId,
- nice: nice,
- payloads: make(chan []byte),
- infosTheir: make(map[[32]byte]*SPInfo),
- started: started,
- rxLock: rxLock,
- txLock: txLock,
- xxOnly: xxOnly,
+ ctx: ctx,
+ hs: noise.NewHandshakeState(conf),
+ Node: node,
+ onlineDeadline: onlineDeadline,
+ maxOnlineTime: maxOnlineTime,
+ nice: nice,
+ payloads: make(chan []byte),
+ infosTheir: make(map[[32]byte]*SPInfo),
+ infosOurSeen: make(map[[32]byte]struct{}),
+ started: started,
+ rxLock: rxLock,
+ txLock: txLock,
+ xxOnly: xxOnly,
}
var infosPayloads [][]byte
if xxOnly == nil || *xxOnly != TTx {
- infosPayloads = ctx.infosOur(nodeId, nice)
+ infosPayloads = ctx.infosOur(nodeId, nice, &state.infosOurSeen)
}
var firstPayload []byte
if len(infosPayloads) > 0 {
firstPayload = infosPayloads[0]
}
- // Pad first payload, to hide actual existing files
+ // Pad first payload, to hide actual number of existing files
for i := 0; i < (MaxSPSize-len(firstPayload))/SPHeadOverhead; i++ {
firstPayload = append(firstPayload, SPHaltMarshalized...)
}
buf, _, _ = state.hs.WriteMessage(nil, firstPayload)
sds := SDS{"node": nodeId, "nice": strconv.Itoa(int(nice))}
ctx.LogD("sp-start", sds, "sending first message")
- conn.SetWriteDeadline(time.Now().Add(DeadlineDuration * time.Second))
+ conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
if err = state.WriteSP(conn, buf); err != nil {
ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
state.dirUnlock()
return nil, err
}
ctx.LogD("sp-start", sds, "waiting for first message")
- conn.SetReadDeadline(time.Now().Add(DeadlineDuration * time.Second))
+ conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second))
if buf, err = state.ReadSP(conn); err != nil {
ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
state.dirUnlock()
},
}
state := SPState{
- ctx: ctx,
- hs: noise.NewHandshakeState(conf),
- nice: nice,
- payloads: make(chan []byte),
- infosTheir: make(map[[32]byte]*SPInfo),
- started: started,
- xxOnly: xxOnly,
+ ctx: ctx,
+ hs: noise.NewHandshakeState(conf),
+ nice: nice,
+ payloads: make(chan []byte),
+ infosOurSeen: make(map[[32]byte]struct{}),
+ infosTheir: make(map[[32]byte]*SPInfo),
+ started: started,
+ xxOnly: xxOnly,
}
var buf []byte
var payload []byte
SDS{"nice": strconv.Itoa(int(nice))},
"waiting for first message",
)
- conn.SetReadDeadline(time.Now().Add(DeadlineDuration * time.Second))
+ conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second))
if buf, err = state.ReadSP(conn); err != nil {
ctx.LogE("sp-start", SDS{"err": err}, "")
return nil, err
return nil, err
}
- var nodeId *NodeId
- for _, node := range ctx.Neigh {
+ var node *Node
+ for _, node = range ctx.Neigh {
if subtle.ConstantTimeCompare(state.hs.PeerStatic(), node.NoisePub[:]) == 1 {
- nodeId = node.Id
break
}
}
- if nodeId == nil {
+ if node == nil {
peerId := ToBase32(state.hs.PeerStatic())
ctx.LogE("sp-start", SDS{"peer": peerId}, "unknown")
return nil, errors.New("Unknown peer: " + peerId)
}
- state.NodeId = nodeId
- sds := SDS{"node": nodeId, "nice": strconv.Itoa(int(nice))}
+ state.Node = node
+ state.onlineDeadline = node.OnlineDeadline
+ state.maxOnlineTime = node.MaxOnlineTime
+ sds := SDS{"node": node.Id, "nice": strconv.Itoa(int(nice))}
- if ctx.ensureRxDir(nodeId); err != nil {
+ if ctx.ensureRxDir(node.Id); err != nil {
return nil, err
}
var rxLock *os.File
if xxOnly != nil && *xxOnly == TRx {
- rxLock, err = ctx.LockDir(nodeId, TRx)
+ rxLock, err = ctx.LockDir(node.Id, TRx)
if err != nil {
return nil, err
}
state.rxLock = rxLock
var txLock *os.File
if xxOnly != nil && *xxOnly == TTx {
- txLock, err = ctx.LockDir(nodeId, TTx)
+ txLock, err = ctx.LockDir(node.Id, TTx)
if err != nil {
return nil, err
}
var infosPayloads [][]byte
if xxOnly == nil || *xxOnly != TTx {
- infosPayloads = ctx.infosOur(nodeId, nice)
+ infosPayloads = ctx.infosOur(node.Id, nice, &state.infosOurSeen)
}
var firstPayload []byte
if len(infosPayloads) > 0 {
firstPayload = infosPayloads[0]
}
- // Pad first payload, to hide actual existing files
+ // Pad first payload, to hide actual number of existing files
for i := 0; i < (MaxSPSize-len(firstPayload))/SPHeadOverhead; i++ {
firstPayload = append(firstPayload, SPHaltMarshalized...)
}
ctx.LogD("sp-start", sds, "sending first message")
buf, state.csTheir, state.csOur = state.hs.WriteMessage(nil, firstPayload)
- conn.SetWriteDeadline(time.Now().Add(DeadlineDuration * time.Second))
+ conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
if err = state.WriteSP(conn, buf); err != nil {
ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
state.dirUnlock()
}
func (state *SPState) StartWorkers(conn net.Conn, infosPayloads [][]byte, payload []byte) error {
- sds := SDS{"node": state.NodeId, "nice": strconv.Itoa(int(state.nice))}
+ sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.nice))}
if len(infosPayloads) > 1 {
go func() {
for _, payload := range infosPayloads[1:] {
state.ctx.LogE("sp-work", SdsAdd(sds, SDS{"err": err}), "")
return err
}
+
go func() {
for _, reply := range replies {
state.ctx.LogD(
state.payloads <- reply
}
}()
+
+ go func() {
+ for range time.Tick(time.Second) {
+ for _, payload := range state.ctx.infosOur(
+ state.Node.Id,
+ state.nice,
+ &state.infosOurSeen,
+ ) {
+ state.ctx.LogD(
+ "sp-work",
+ SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ "queuing new info",
+ )
+ state.payloads <- payload
+ }
+ }
+ }()
+
state.wg.Add(1)
go func() {
- defer state.wg.Done()
+ defer func() {
+ state.isDead = true
+ state.wg.Done()
+ }()
for {
- if state.isDead() {
+ if state.NotAlive() {
return
}
var payload []byte
state.ctx.LogD("sp-file", sdsp, "queueing")
fd, err := os.Open(filepath.Join(
state.ctx.Spool,
- state.NodeId.String(),
+ state.Node.Id.String(),
string(TTx),
ToBase32(freq.Hash[:]),
))
var buf []byte
if freq.Offset < fullSize {
state.ctx.LogD("sp-file", sdsp, "seeking")
- if _, err = fd.Seek(int64(freq.Offset), 0); err != nil {
+ if _, err = fd.Seek(int64(freq.Offset), io.SeekStart); err != nil {
state.ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
break
}
SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
"sending",
)
- conn.SetWriteDeadline(time.Now().Add(DeadlineDuration * time.Second))
+ conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := state.WriteSP(conn, state.csOur.Encrypt(nil, nil, payload)); err != nil {
state.ctx.LogE("sp-xmit", SdsAdd(sds, SDS{"err": err}), "")
break
}
}
}()
+
state.wg.Add(1)
go func() {
- defer state.wg.Done()
+ defer func() {
+ state.isDead = true
+ state.wg.Done()
+ }()
for {
- if state.isDead() {
+ if state.NotAlive() {
return
}
state.ctx.LogD("sp-recv", sds, "waiting for payload")
- conn.SetReadDeadline(time.Now().Add(DeadlineDuration * time.Second))
+ conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second))
payload, err := state.ReadSP(conn)
if err != nil {
unmarshalErr := err.(*xdr.UnmarshalError)
netErr, ok := unmarshalErr.Err.(net.Error)
- if (ok && netErr.Timeout()) || unmarshalErr.ErrorCode == xdr.ErrIO {
+ if ok && netErr.Timeout() {
continue
- } else {
- state.ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+ }
+ if unmarshalErr.ErrorCode == xdr.ErrIO {
break
}
+ state.ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+ break
}
state.ctx.LogD(
"sp-recv",
}()
}
}()
+
return nil
}
}
func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
- sds := SDS{"node": state.NodeId, "nice": strconv.Itoa(int(state.nice))}
+ sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.nice))}
r := bytes.NewReader(payload)
var err error
var replies [][]byte
state.ctx.LogD("sp-process", sdsp, "stating part")
if _, err = os.Stat(filepath.Join(
state.ctx.Spool,
- state.NodeId.String(),
+ state.Node.Id.String(),
string(TRx),
ToBase32(info.Hash[:]),
)); err == nil {
}
fi, err := os.Stat(filepath.Join(
state.ctx.Spool,
- state.NodeId.String(),
+ state.Node.Id.String(),
string(TRx),
ToBase32(info.Hash[:])+PartSuffix,
))
})
filePath := filepath.Join(
state.ctx.Spool,
- state.NodeId.String(),
+ state.Node.Id.String(),
string(TRx),
ToBase32(file.Hash[:]),
)
SdsAdd(sdsp, SDS{"offset": strconv.FormatInt(int64(file.Offset), 10)}),
"seeking",
)
- if _, err = fd.Seek(int64(file.Offset), 0); err != nil {
+ if _, err = fd.Seek(int64(file.Offset), io.SeekStart); err != nil {
state.ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
fd.Close()
return nil, err
}
state.wg.Add(1)
defer state.wg.Done()
- fd.Seek(0, 0)
+ fd.Seek(0, io.SeekStart)
state.ctx.LogD("sp-file", sdsp, "checking")
gut, err := Check(fd, file.Hash[:])
fd.Close()
}
state.ctx.LogI("sp-done", SdsAdd(sdsp, SDS{"xx": string(TRx)}), "")
os.Rename(filePath+PartSuffix, filePath)
+ state.Lock()
+ delete(state.infosTheir, *file.Hash)
+ state.Unlock()
go func() {
state.payloads <- MarshalSP(SPTypeDone, SPDone{file.Hash})
}()
state.ctx.LogD("sp-done", sdsp, "removing")
err := os.Remove(filepath.Join(
state.ctx.Spool,
- state.NodeId.String(),
+ state.Node.Id.String(),
string(TTx),
ToBase32(done.Hash[:]),
))
}
state.ctx.LogI("sp-infos", SDS{
"xx": string(TRx),
- "node": state.NodeId,
+ "node": state.Node.Id,
"pkts": strconv.Itoa(pkts),
"size": strconv.FormatInt(int64(size), 10),
}, "")
errs := make(chan error, 1)
go func(job Job) {
pipeWB := bufio.NewWriter(pipeW)
- _, err := PktEncRead(
+ _, _, err := PktEncRead(
ctx.Self,
ctx.Neigh,
bufio.NewReader(job.Fd),
log.Fatalln(err)
}
sendmail := ctx.Neigh[*job.PktEnc.Sender].Sendmail
+ if len(sendmail) == 0 {
+ ctx.LogE("rx", SdsAdd(sds, SDS{"err": "No sendmail configured"}), "")
+ isBad = true
+ goto Closing
+ }
if !dryRun {
cmd := exec.Command(
sendmail[0],
goto Closing
}
if !dryRun {
- if err = ctx.TxFile(sender, job.PktEnc.Nice, filepath.Join(*freq, src), dst); err != nil {
+ if err = ctx.TxFile(sender, job.PktEnc.Nice, filepath.Join(*freq, src), dst, 0); err != nil {
ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "tx file")
isBad = true
goto Closing
DefaultNiceMail,
"recipient",
[]byte{123},
+ 1<<15,
); err != nil {
panic(err)
}
DefaultNiceFile,
src,
fileName,
+ 1<<15,
); err != nil {
panic(err)
}
DefaultNiceFile,
srcPath,
"samefile",
+ 1<<15,
); err != nil {
panic(err)
}
DefaultNiceFreq,
fileName,
fileName,
+ 1<<15,
); err != nil {
panic(err)
}
}
for job := range ctx.Jobs(ctx.Self.Id, TTx) {
var buf bytes.Buffer
- _, err := PktEncRead(ctx.Self, ctx.Neigh, job.Fd, &buf)
+ _, _, err := PktEncRead(ctx.Self, ctx.Neigh, job.Fd, &buf)
if err != nil {
panic(err)
}
&pktTrans,
123,
int64(len(data)),
+ 0,
bytes.NewReader(data),
&dst,
); err != nil {
"golang.org/x/crypto/blake2b"
)
-func (ctx *Ctx) Tx(node *Node, pkt *Pkt, nice uint8, size int64, src io.Reader) (*Node, error) {
+func (ctx *Ctx) Tx(node *Node, pkt *Pkt, nice uint8, size, minSize int64, src io.Reader) (*Node, error) {
tmp, err := ctx.NewTmpFileWHash()
if err != nil {
return nil, err
lastNode = ctx.Neigh[*node.Via[i-1]]
hops = append(hops, lastNode)
}
+ padSize := minSize - size - int64(len(hops))*(PktOverhead+PktEncOverhead)
+ if padSize < 0 {
+ padSize = 0
+ }
errs := make(chan error)
curSize := size
pipeR, pipeW := io.Pipe()
"nice": strconv.Itoa(int(nice)),
"size": strconv.FormatInt(size, 10),
}, "wrote")
- errs <- PktEncWrite(ctx.Self, hops[0], pkt, nice, size, src, dst)
+ errs <- PktEncWrite(ctx.Self, hops[0], pkt, nice, size, padSize, src, dst)
dst.Close()
}(curSize, src, pipeW)
+ curSize += padSize
var pipeRPrev io.Reader
for i := 1; i < len(hops); i++ {
"nice": strconv.Itoa(int(nice)),
"size": strconv.FormatInt(size, 10),
}, "trns wrote")
- errs <- PktEncWrite(ctx.Self, node, pkt, nice, size, src, dst)
+ errs <- PktEncWrite(ctx.Self, node, pkt, nice, size, 0, src, dst)
dst.Close()
}(hops[i], &pktTrans, curSize, pipeRPrev, pipeW)
}
return lastNode, err
}
-func (ctx *Ctx) TxFile(node *Node, nice uint8, srcPath, dstPath string) error {
+func (ctx *Ctx) TxFile(node *Node, nice uint8, srcPath, dstPath string, minSize int64) error {
if dstPath == "" {
dstPath = filepath.Base(srcPath)
}
if err != nil {
return err
}
- _, err = ctx.Tx(node, pkt, nice, srcStat.Size(), bufio.NewReader(src))
+ _, err = ctx.Tx(node, pkt, nice, srcStat.Size(), minSize, bufio.NewReader(src))
if err == nil {
ctx.LogI("tx", SDS{
"type": "file",
return err
}
-func (ctx *Ctx) TxFreq(node *Node, nice uint8, srcPath, dstPath string) error {
+func (ctx *Ctx) TxFreq(node *Node, nice uint8, srcPath, dstPath string, minSize int64) error {
dstPath = filepath.Clean(dstPath)
if filepath.IsAbs(dstPath) {
return errors.New("Relative destination path required")
}
src := strings.NewReader(dstPath)
size := int64(src.Len())
- _, err = ctx.Tx(node, pkt, nice, size, src)
+ _, err = ctx.Tx(node, pkt, nice, size, minSize, src)
if err == nil {
ctx.LogI("tx", SDS{
"type": "freq",
return err
}
-func (ctx *Ctx) TxMail(node *Node, nice uint8, recipient string, body []byte) error {
+func (ctx *Ctx) TxMail(node *Node, nice uint8, recipient string, body []byte, minSize int64) error {
pkt, err := NewPkt(PktTypeMail, recipient)
if err != nil {
return err
}
var compressed bytes.Buffer
- compressor := zlib.NewWriter(&compressed)
+ compressor, err := zlib.NewWriterLevel(&compressed, zlib.BestCompression)
+ if err != nil {
+ return err
+ }
if _, err = io.Copy(compressor, bytes.NewReader(body)); err != nil {
return err
}
compressor.Close()
size := int64(compressed.Len())
- _, err = ctx.Tx(node, pkt, nice, size, &compressed)
+ _, err = ctx.Tx(node, pkt, nice, size, minSize, &compressed)
if err == nil {
ctx.LogI("tx", SDS{
"type": "mail",
)
func TestTx(t *testing.T) {
- f := func(hops uint8, pathSrc, data string, nice uint8) bool {
+ f := func(hops uint8, pathSrc, data string, nice uint8, padSize int16) bool {
if len(pathSrc) > int(MaxPathSize) {
pathSrc = pathSrc[:MaxPathSize]
}
}
pkt, err := NewPkt(PktTypeMail, pathSrc)
src := strings.NewReader(data)
- dstNode, err := ctx.Tx(nodeTgt, pkt, 123, int64(src.Len()), src)
+ dstNode, err := ctx.Tx(
+ nodeTgt,
+ pkt,
+ 123,
+ int64(src.Len()),
+ int64(padSize),
+ src,
+ )
if err != nil {
return false
}
vias := append(nodeTgt.Via, nodeTgt.Id)
for i, hopId := range vias {
hopOur := privates[*hopId]
- foundNode, err := PktEncRead(hopOur, ctx.Neigh, &bufR, &bufW)
+ foundNode, _, err := PktEncRead(hopOur, ctx.Neigh, &bufR, &bufW)
if err != nil {
return false
}
--- /dev/null
+Subproject commit d520615e531a6bf3fb69406b9eba718261285ec8