X-Git-Url: http://www.git.cypherpunks.ru/?a=blobdiff_plain;f=src%2Fcypherpunks.ru%2Fgovpn%2Fpeer.go;h=b466161be989b38e0a7c1fce89211c87529e56dc;hb=bb60f10e8d825d49e635b840b5eb7512811256d9;hp=5ea245bbd44a02aa196c66801b3bd22b4c3f19f3;hpb=cecb63f12f4a9f523276a0c19c7feb7437c7f53a;p=govpn.git diff --git a/src/cypherpunks.ru/govpn/peer.go b/src/cypherpunks.ru/govpn/peer.go index 5ea245b..b466161 100644 --- a/src/cypherpunks.ru/govpn/peer.go +++ b/src/cypherpunks.ru/govpn/peer.go @@ -1,6 +1,6 @@ /* GoVPN -- simple secure free software virtual private network daemon -Copyright (C) 2014-2016 Sergey Matveev +Copyright (C) 2014-2017 Sergey Matveev This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -20,53 +20,81 @@ package govpn import ( "bytes" + "crypto/subtle" "encoding/binary" "io" - "log" "sync" "sync/atomic" "time" + "chacha20" + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" + "golang.org/x/crypto/blake2b" "golang.org/x/crypto/poly1305" - "golang.org/x/crypto/salsa20" - "golang.org/x/crypto/xtea" ) const ( + // NonceSize is nonce size NonceSize = 8 - NonceBucketSize = 128 - TagSize = poly1305.TagSize - // S20BS is Salsa20's internal blocksize in bytes - S20BS = 64 - // Maximal amount of bytes transfered with single key (4 GiB) - MaxBytesPerKey int64 = 1 << 32 + nonceBucketSize = 256 + tagSize = poly1305.TagSize + CC20IBS = 64 + // MaxBytesPerKey is maximal amount of bytes transferred with single key (4 GiB) + MaxBytesPerKey uint64 = 1 << 32 // Heartbeat rate, relative to Timeout - TimeoutHeartbeat = 4 - // Minimal valid packet length + timeoutHeartbeat = 4 + // MinPktLength is minimal valid packet length MinPktLength = 1 + 16 + 8 - // Padding byte - PadByte = byte(0x80) + // padding byte + padByte = byte(0x80) + + logPrefixPeer = "peer_" ) -func newNonceCipher(key *[32]byte) *xtea.Cipher { - nonceKey := make([]byte, 16) - salsa20.XORKeyStream( - nonceKey, - make([]byte, 32), - make([]byte, xtea.BlockSize), - key, - ) - ciph, err := xtea.NewCipher(nonceKey) +func newNonces(key *[32]byte, i uint64) (chan *[NonceSize]byte, error) { + macKey := make([]byte, 32) + chacha20.XORKeyStream(macKey, make([]byte, 32), new([16]byte), key) + mac, err := blake2b.New256(macKey) if err != nil { panic(err) } - return ciph + sum := make([]byte, mac.Size()) + nonces := make(chan *[NonceSize]byte, nonceBucketSize*3) + go func() { + for { + buf := new([NonceSize]byte) + binary.BigEndian.PutUint64(buf[:], i) + mac.Write(buf[:]) + mac.Sum(sum[:0]) + copy(buf[:], sum) + nonces <- buf + mac.Reset() + i += 2 + } + }() + return nonces, nil } +// Peer is a GoVPN peer (client) type Peer struct { - Addr string - Id *PeerId - Conn io.Writer + // Statistics (they are at the beginning for correct int64 alignment) + BytesIn uint64 + BytesOut uint64 + BytesPayloadIn uint64 + BytesPayloadOut uint64 + FramesIn uint64 + FramesOut uint64 + FramesUnauth uint64 + FramesDup uint64 + HeartbeatRecv uint64 + HeartbeatSent uint64 + + // Basic + Addr string + ID *PeerID + Conn io.Writer `json:"-"` + Protocol Protocol // Traffic behaviour NoiseEnable bool @@ -75,63 +103,76 @@ type Peer struct { Encless bool MTU int - // Cryptography related - Key *[SSize]byte `json:"-"` - NonceCipher *xtea.Cipher `json:"-"` - nonceRecv uint64 - nonceLatest uint64 - nonceOur uint64 - NonceExpect uint64 `json:"-"` - nonceBucket0 map[uint64]struct{} - nonceBucket1 map[uint64]struct{} - nonceFound0 bool - nonceFound1 bool - nonceBucketN int32 + key *[SSize]byte // Timers - Timeout time.Duration `json:"-"` - Established time.Time - LastPing time.Time - LastSent time.Time - willSentCycle time.Time - - // Statistics - BytesIn int64 - BytesOut int64 - BytesPayloadIn int64 - BytesPayloadOut int64 - FramesIn int - FramesOut int - FramesUnauth int - FramesDup int - HeartbeatRecv int - HeartbeatSent int + Timeout time.Duration `json:"-"` + Established time.Time + LastPing time.Time // Receiver BusyR sync.Mutex `json:"-"` bufR []byte - tagR *[TagSize]byte + tagR *[tagSize]byte keyAuthR *[SSize]byte + nonceR *[16]byte pktSizeR int + // UDP-related + noncesR chan *[NonceSize]byte + nonceRecv [NonceSize]byte + nonceBucketL map[[NonceSize]byte]struct{} + nonceBucketM map[[NonceSize]byte]struct{} + nonceBucketH map[[NonceSize]byte]struct{} + + // TCP-related + NonceExpect []byte `json:"-"` + noncesExpect chan *[NonceSize]byte + // Transmitter BusyT sync.Mutex `json:"-"` bufT []byte - tagT *[TagSize]byte + tagT *[tagSize]byte keyAuthT *[SSize]byte + nonceT *[16]byte frameT []byte - now time.Time + noncesT chan *[NonceSize]byte +} + +// LogFields returns a logrus compatible Fields to identity a single +// peer in logs +func (p *Peer) LogFields() logrus.Fields { + return logrus.Fields{ + logPrefixPeer + "addr": p.Addr, + logPrefixPeer + "id": p.ID.String(), + logPrefixPeer + "established": p.Established.String(), + logPrefixPeer + "last_ping": p.LastPing.String(), + } +} + +// ConfigurationLogFields returns a logrus compatible Fields with the +// settings of a single peer. Complement LogFields() for extra debugging +// details. +func (p *Peer) ConfigurationLogFields() logrus.Fields { + return logrus.Fields{ + logPrefixPeer + "timeout": p.Timeout.String(), + logPrefixPeer + "protocol": p.Protocol.String(), + logPrefixPeer + "noise": p.NoiseEnable, + logPrefixPeer + "cpr": p.CPR, + logPrefixPeer + "mtu": p.MTU, + logPrefixPeer + "encless": p.Encless, + } } func (p *Peer) String() string { - return p.Id.String() + ":" + p.Addr + return p.ID.String() + ":" + p.Addr } // Zero peer's memory state. func (p *Peer) Zero() { p.BusyT.Lock() p.BusyR.Lock() - SliceZero(p.Key[:]) + SliceZero(p.key[:]) SliceZero(p.bufR) SliceZero(p.bufT) SliceZero(p.keyAuthR[:]) @@ -140,11 +181,6 @@ func (p *Peer) Zero() { p.BusyR.Unlock() } -func (p *Peer) NonceExpectation(buf []byte) { - binary.BigEndian.PutUint64(buf, p.NonceExpect) - p.NonceCipher.Encrypt(buf, buf) -} - func cprCycleCalculate(conf *PeerConf) time.Duration { if conf.CPR == 0 { return time.Duration(0) @@ -158,7 +194,7 @@ func cprCycleCalculate(conf *PeerConf) time.Duration { return time.Second / time.Duration(rate) } -func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[SSize]byte) *Peer { +func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[SSize]byte) (*Peer, error) { now := time.Now() timeout := conf.Timeout @@ -168,17 +204,18 @@ func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[S noiseEnable = true timeout = cprCycle } else { - timeout = timeout / TimeoutHeartbeat + timeout = timeout / timeoutHeartbeat } - bufSize := S20BS + 2*conf.MTU + bufSize := CC20IBS + 2*conf.MTU if conf.Encless { bufSize += EnclessEnlargeSize noiseEnable = true } + peer := Peer{ Addr: addr, - Id: conf.Id, + ID: conf.ID, Conn: conn, NoiseEnable: noiseEnable, @@ -187,10 +224,7 @@ func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[S Encless: conf.Encless, MTU: conf.MTU, - Key: key, - NonceCipher: newNonceCipher(key), - nonceBucket0: make(map[uint64]struct{}, NonceBucketSize), - nonceBucket1: make(map[uint64]struct{}, NonceBucketSize), + key: key, Timeout: timeout, Established: now, @@ -198,121 +232,152 @@ func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[S bufR: make([]byte, bufSize), bufT: make([]byte, bufSize), - tagR: new([TagSize]byte), - tagT: new([TagSize]byte), + tagR: new([tagSize]byte), + tagT: new([tagSize]byte), keyAuthR: new([SSize]byte), + nonceR: new([16]byte), keyAuthT: new([SSize]byte), + nonceT: new([16]byte), } + + var err error if isClient { - peer.nonceOur = 1 - peer.NonceExpect = 0 + 2 + if peer.noncesT, err = newNonces(peer.key, 1+2); err != nil { + return nil, err + } + if peer.noncesR, err = newNonces(peer.key, 0+2); err != nil { + return nil, err + } + if peer.noncesExpect, err = newNonces(peer.key, 0+2); err != nil { + return nil, err + } } else { - peer.nonceOur = 0 - peer.NonceExpect = 1 + 2 + if peer.noncesT, err = newNonces(peer.key, 0+2); err != nil { + return nil, err + } + if peer.noncesR, err = newNonces(peer.key, 1+2); err != nil { + return nil, err + } + if peer.noncesExpect, err = newNonces(peer.key, 1+2); err != nil { + return nil, err + } + } + + peer.NonceExpect = make([]byte, NonceSize) + nonce := <-peer.noncesExpect + copy(peer.NonceExpect, nonce[:]) + + var i int + peer.nonceBucketL = make(map[[NonceSize]byte]struct{}, nonceBucketSize) + for i = 0; i < nonceBucketSize; i++ { + nonce = <-peer.noncesR + peer.nonceBucketL[*nonce] = struct{}{} + } + peer.nonceBucketM = make(map[[NonceSize]byte]struct{}, nonceBucketSize) + for i = 0; i < nonceBucketSize; i++ { + nonce = <-peer.noncesR + peer.nonceBucketM[*nonce] = struct{}{} + } + peer.nonceBucketH = make(map[[NonceSize]byte]struct{}, nonceBucketSize) + for i = 0; i < nonceBucketSize; i++ { + nonce = <-peer.noncesR + peer.nonceBucketH[*nonce] = struct{}{} } - return &peer + return &peer, nil } -// Process incoming Ethernet packet. +// EthProcess processes incoming Ethernet packet. // ready channel is TAPListen's synchronization channel used to tell him // that he is free to receive new packets. Encrypted and authenticated // packets will be sent to remote Peer side immediately. -func (p *Peer) EthProcess(data []byte) { - if len(data) > p.MTU-1 { // 1 is for padding byte - log.Println("Padded data packet size", len(data)+1, "is bigger than MTU", p.MTU, p) - return +func (p *Peer) EthProcess(data []byte) error { + const paddingSize = 1 + if len(data) > p.MTU-paddingSize { + logger.WithFields(p.LogFields()).WithFields( + p.ConfigurationLogFields(), + ).WithFields( + logrus.Fields{ + "func": logFuncPrefix + "Peer.EthProcess", + "padding": paddingSize, + "packet_size": len(data), + }).Warning("Ignore padded data packet larger than MTU") + return nil } - p.now = time.Now() p.BusyT.Lock() + defer p.BusyT.Unlock() // Zero size is a heartbeat packet SliceZero(p.bufT) if len(data) == 0 { - // If this heartbeat is necessary - if !p.LastSent.Add(p.Timeout).Before(p.now) { - p.BusyT.Unlock() - return - } - p.bufT[S20BS+0] = PadByte + p.bufT[CC20IBS+0] = padByte p.HeartbeatSent++ } else { // Copy payload to our internal buffer and we are ready to // accept the next one - copy(p.bufT[S20BS:], data) - p.bufT[S20BS+len(data)] = PadByte - p.BytesPayloadOut += int64(len(data)) + copy(p.bufT[CC20IBS:], data) + p.bufT[CC20IBS+len(data)] = padByte + p.BytesPayloadOut += uint64(len(data)) } if p.NoiseEnable && !p.Encless { - p.frameT = p.bufT[S20BS : S20BS+p.MTU-TagSize] + p.frameT = p.bufT[CC20IBS : CC20IBS+p.MTU-tagSize] } else if p.Encless { - p.frameT = p.bufT[S20BS : S20BS+p.MTU] + p.frameT = p.bufT[CC20IBS : CC20IBS+p.MTU] } else { - p.frameT = p.bufT[S20BS : S20BS+len(data)+1+NonceSize] + p.frameT = p.bufT[CC20IBS : CC20IBS+len(data)+1+NonceSize] } - p.nonceOur += 2 - binary.BigEndian.PutUint64(p.frameT[len(p.frameT)-NonceSize:], p.nonceOur) - p.NonceCipher.Encrypt( - p.frameT[len(p.frameT)-NonceSize:], - p.frameT[len(p.frameT)-NonceSize:], - ) + copy(p.frameT[len(p.frameT)-NonceSize:], (<-p.noncesT)[:]) var out []byte + copy(p.nonceT[8:], p.frameT[len(p.frameT)-NonceSize:]) if p.Encless { var err error - out, err = EnclessEncode( - p.Key, - p.frameT[len(p.frameT)-NonceSize:], - p.frameT[:len(p.frameT)-NonceSize], - ) + out, err = EnclessEncode(p.key, p.nonceT, p.frameT[:len(p.frameT)-NonceSize]) if err != nil { - panic(err) + return errors.Wrap(err, wrapEnclessEncode) } out = append(out, p.frameT[len(p.frameT)-NonceSize:]...) } else { - salsa20.XORKeyStream( - p.bufT[:S20BS+len(p.frameT)-NonceSize], - p.bufT[:S20BS+len(p.frameT)-NonceSize], - p.frameT[len(p.frameT)-NonceSize:], - p.Key, + chacha20.XORKeyStream( + p.bufT[:CC20IBS+len(p.frameT)-NonceSize], + p.bufT[:CC20IBS+len(p.frameT)-NonceSize], + p.nonceT, + p.key, ) copy(p.keyAuthT[:], p.bufT[:SSize]) poly1305.Sum(p.tagT, p.frameT, p.keyAuthT) - atomic.AddInt64(&p.BytesOut, int64(len(p.frameT)+TagSize)) + atomic.AddUint64(&p.BytesOut, uint64(len(p.frameT)+tagSize)) out = append(p.tagT[:], p.frameT...) } p.FramesOut++ - - if p.CPRCycle != time.Duration(0) { - p.willSentCycle = p.LastSent.Add(p.CPRCycle) - if p.willSentCycle.After(p.now) { - time.Sleep(p.willSentCycle.Sub(p.now)) - p.now = p.willSentCycle - } - } - - p.LastSent = p.now - p.Conn.Write(out) - p.BusyT.Unlock() + _, err := p.Conn.Write(out) + return errors.Wrap(err, "p.Conn.Write") } +// PktProcess processes data of a single packet func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { + fields := logrus.Fields{ + "func": logFuncPrefix + "Peer.PktProcess", + "reorderable": reorderable, + "data": len(data), + } if len(data) < MinPktLength { + logger.WithFields(p.LogFields()).WithFields(fields).WithField( + "minimum_packet_Length", MinPktLength, + ).Debug("Ignore packet smaller than allowed minimum") return false } - if !p.Encless && len(data) > len(p.bufR)-S20BS { + if !p.Encless && len(data) > len(p.bufR)-CC20IBS { return false } var out []byte - p.BusyR.Lock() + p.BusyR.Lock() // TODO use defer to unlock? + copy(p.nonceR[8:], data[len(data)-NonceSize:]) if p.Encless { var err error - out, err = EnclessDecode( - p.Key, - data[len(data)-NonceSize:], - data[:len(data)-NonceSize], - ) + out, err = EnclessDecode(p.key, p.nonceR, data[:len(data)-NonceSize]) if err != nil { + logger.WithFields(p.LogFields()).WithError(err).Debug("Failed to decode encless") p.FramesUnauth++ p.BusyR.Unlock() return false @@ -321,64 +386,68 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { for i := 0; i < SSize; i++ { p.bufR[i] = 0 } - copy(p.bufR[S20BS:], data[TagSize:]) - salsa20.XORKeyStream( - p.bufR[:S20BS+len(data)-TagSize-NonceSize], - p.bufR[:S20BS+len(data)-TagSize-NonceSize], - data[len(data)-NonceSize:], - p.Key, + copy(p.bufR[CC20IBS:], data[tagSize:]) + chacha20.XORKeyStream( + p.bufR[:CC20IBS+len(data)-tagSize-NonceSize], + p.bufR[:CC20IBS+len(data)-tagSize-NonceSize], + p.nonceR, + p.key, ) copy(p.keyAuthR[:], p.bufR[:SSize]) - copy(p.tagR[:], data[:TagSize]) - if !poly1305.Verify(p.tagR, data[TagSize:], p.keyAuthR) { + copy(p.tagR[:], data[:tagSize]) + if !poly1305.Verify(p.tagR, data[tagSize:], p.keyAuthR) { p.FramesUnauth++ p.BusyR.Unlock() return false } - out = p.bufR[S20BS : S20BS+len(data)-TagSize-NonceSize] + out = p.bufR[CC20IBS : CC20IBS+len(data)-tagSize-NonceSize] } - // Check if received nonce is known to us in either of two buckets. - // If yes, then this is ignored duplicate. - // Check from the oldest bucket, as in most cases this will result - // in constant time check. - // If Bucket0 is filled, then it becomes Bucket1. - p.NonceCipher.Decrypt( - data[len(data)-NonceSize:], - data[len(data)-NonceSize:], - ) - p.nonceRecv = binary.BigEndian.Uint64(data[len(data)-NonceSize:]) if reorderable { - _, p.nonceFound0 = p.nonceBucket0[p.nonceRecv] - _, p.nonceFound1 = p.nonceBucket1[p.nonceRecv] - if p.nonceFound0 || p.nonceFound1 || p.nonceRecv+2*NonceBucketSize < p.nonceLatest { + copy(p.nonceRecv[:], data[len(data)-NonceSize:]) + _, foundL := p.nonceBucketL[p.nonceRecv] + _, foundM := p.nonceBucketM[p.nonceRecv] + _, foundH := p.nonceBucketH[p.nonceRecv] + // If found is none of buckets: either it is too old, + // or too new (many packets were lost) + if !(foundL || foundM || foundH) { p.FramesDup++ p.BusyR.Unlock() return false } - p.nonceBucket0[p.nonceRecv] = struct{}{} - p.nonceBucketN++ - if p.nonceBucketN == NonceBucketSize { - p.nonceBucket1 = p.nonceBucket0 - p.nonceBucket0 = make(map[uint64]struct{}, NonceBucketSize) - p.nonceBucketN = 0 + // Delete seen nonce + if foundL { + delete(p.nonceBucketL, p.nonceRecv) + } + if foundM { + delete(p.nonceBucketM, p.nonceRecv) + } + if foundH { + delete(p.nonceBucketH, p.nonceRecv) + } + // If we are dealing with the latest bucket, create the new one + if foundH { + p.nonceBucketL, p.nonceBucketM = p.nonceBucketM, p.nonceBucketH + p.nonceBucketH = make(map[[NonceSize]byte]struct{}) + var nonce *[NonceSize]byte + for i := 0; i < nonceBucketSize; i++ { + nonce = <-p.noncesR + p.nonceBucketH[*nonce] = struct{}{} + } } } else { - if p.nonceRecv != p.NonceExpect { + if subtle.ConstantTimeCompare(data[len(data)-NonceSize:], p.NonceExpect) != 1 { p.FramesDup++ p.BusyR.Unlock() return false } - p.NonceExpect += 2 - } - if p.nonceRecv > p.nonceLatest { - p.nonceLatest = p.nonceRecv + copy(p.NonceExpect, (<-p.noncesExpect)[:]) } p.FramesIn++ - atomic.AddInt64(&p.BytesIn, int64(len(data))) + atomic.AddUint64(&p.BytesIn, uint64(len(data))) p.LastPing = time.Now() - p.pktSizeR = bytes.LastIndexByte(out, PadByte) + p.pktSizeR = bytes.LastIndexByte(out, padByte) if p.pktSizeR == -1 { p.BusyR.Unlock() return false @@ -396,8 +465,78 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { p.BusyR.Unlock() return true } - p.BytesPayloadIn += int64(p.pktSizeR) + p.BytesPayloadIn += uint64(p.pktSizeR) tap.Write(out[:p.pktSizeR]) p.BusyR.Unlock() return true } + +// PeerTapProcessor processes a TUN/TAP peer +func PeerTapProcessor(peer *Peer, tap *TAP, terminator chan struct{}) { + var data []byte + var now time.Time + var err error + fields := logrus.Fields{ + "func": logFuncPrefix + "PeerTapProcessor", + "tap": tap.Name, + } + lastSent := time.Now() + heartbeat := time.NewTicker(peer.Timeout) + if peer.CPRCycle == time.Duration(0) { + RawProcessor: + for { + select { + case <-terminator: + break RawProcessor + case <-heartbeat.C: + now = time.Now() + if lastSent.Add(peer.Timeout).Before(now) { + if err = peer.EthProcess(nil); err != nil { + logger.WithFields( + fields, + ).WithFields( + peer.LogFields(), + ).WithError(err).Warn( + "Can't process nil ethernet packet", + ) + } + lastSent = now + } + case data = <-tap.Sink: + if err = peer.EthProcess(data); err != nil { + logger.WithFields(fields).WithFields( + peer.LogFields(), + ).WithError(err).Warn("Can't process ethernet packet") + } + lastSent = time.Now() + } + } + } else { + CPRProcessor: + for { + data = nil + select { + case <-terminator: + break CPRProcessor + case data = <-tap.Sink: + if err = peer.EthProcess(data); err != nil { + logger.WithFields(fields).WithFields( + peer.LogFields(), + ).WithError(err).Warn("Can't process ethernet packet") + } + default: + } + if data == nil { + if err = peer.EthProcess(nil); err != nil { + logger.WithFields(fields).WithFields( + peer.LogFields(), + ).WithError(err).Warn("Can't process nil ethernet packet") + } + } + time.Sleep(peer.CPRCycle) + } + } + close(terminator) + peer.Zero() + heartbeat.Stop() +}