X-Git-Url: http://www.git.cypherpunks.ru/?a=blobdiff_plain;f=src%2Fcypherpunks.ru%2Fgovpn%2Fpeer.go;h=b466161be989b38e0a7c1fce89211c87529e56dc;hb=bb60f10e8d825d49e635b840b5eb7512811256d9;hp=24f694557844216cc5610c87978b069dc0a75f27;hpb=cb347258e44c70b4b8339a7561ca1023b69c9d2c;p=govpn.git diff --git a/src/cypherpunks.ru/govpn/peer.go b/src/cypherpunks.ru/govpn/peer.go index 24f6945..b466161 100644 --- a/src/cypherpunks.ru/govpn/peer.go +++ b/src/cypherpunks.ru/govpn/peer.go @@ -36,10 +36,10 @@ import ( const ( // NonceSize is nonce size - NonceSize = 8 - nonceBucketSize = 256 - tagSize = poly1305.TagSize - chacha20InternalBlockSize = 64 + NonceSize = 8 + nonceBucketSize = 256 + tagSize = poly1305.TagSize + CC20IBS = 64 // MaxBytesPerKey is maximal amount of bytes transferred with single key (4 GiB) MaxBytesPerKey uint64 = 1 << 32 // Heartbeat rate, relative to Timeout @@ -207,7 +207,7 @@ func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[S timeout = timeout / timeoutHeartbeat } - bufSize := chacha20InternalBlockSize + 2*conf.MTU + bufSize := CC20IBS + 2*conf.MTU if conf.Encless { bufSize += EnclessEnlargeSize noiseEnable = true @@ -293,13 +293,14 @@ func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[S // packets will be sent to remote Peer side immediately. func (p *Peer) EthProcess(data []byte) error { const paddingSize = 1 - lenData := len(data) - if lenData > p.MTU-paddingSize { - logger.WithFields(p.LogFields()).WithFields(p.ConfigurationLogFields()).WithFields( + if len(data) > p.MTU-paddingSize { + logger.WithFields(p.LogFields()).WithFields( + p.ConfigurationLogFields(), + ).WithFields( logrus.Fields{ "func": logFuncPrefix + "Peer.EthProcess", "padding": paddingSize, - "packet_size": lenData, + "packet_size": len(data), }).Warning("Ignore padded data packet larger than MTU") return nil } @@ -308,23 +309,23 @@ func (p *Peer) EthProcess(data []byte) error { // Zero size is a heartbeat packet SliceZero(p.bufT) - if lenData == 0 { - p.bufT[chacha20InternalBlockSize+0] = padByte + if len(data) == 0 { + p.bufT[CC20IBS+0] = padByte p.HeartbeatSent++ } else { // Copy payload to our internal buffer and we are ready to // accept the next one - copy(p.bufT[chacha20InternalBlockSize:], data) - p.bufT[chacha20InternalBlockSize+lenData] = padByte - p.BytesPayloadOut += uint64(lenData) + copy(p.bufT[CC20IBS:], data) + p.bufT[CC20IBS+len(data)] = padByte + p.BytesPayloadOut += uint64(len(data)) } if p.NoiseEnable && !p.Encless { - p.frameT = p.bufT[chacha20InternalBlockSize : chacha20InternalBlockSize+p.MTU-tagSize] + p.frameT = p.bufT[CC20IBS : CC20IBS+p.MTU-tagSize] } else if p.Encless { - p.frameT = p.bufT[chacha20InternalBlockSize : chacha20InternalBlockSize+p.MTU] + p.frameT = p.bufT[CC20IBS : CC20IBS+p.MTU] } else { - p.frameT = p.bufT[chacha20InternalBlockSize : chacha20InternalBlockSize+lenData+1+NonceSize] + p.frameT = p.bufT[CC20IBS : CC20IBS+len(data)+1+NonceSize] } copy(p.frameT[len(p.frameT)-NonceSize:], (<-p.noncesT)[:]) var out []byte @@ -338,8 +339,8 @@ func (p *Peer) EthProcess(data []byte) error { out = append(out, p.frameT[len(p.frameT)-NonceSize:]...) } else { chacha20.XORKeyStream( - p.bufT[:chacha20InternalBlockSize+len(p.frameT)-NonceSize], - p.bufT[:chacha20InternalBlockSize+len(p.frameT)-NonceSize], + p.bufT[:CC20IBS+len(p.frameT)-NonceSize], + p.bufT[:CC20IBS+len(p.frameT)-NonceSize], p.nonceT, p.key, ) @@ -355,25 +356,26 @@ func (p *Peer) EthProcess(data []byte) error { // PktProcess processes data of a single packet func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { - lenData := len(data) fields := logrus.Fields{ "func": logFuncPrefix + "Peer.PktProcess", "reorderable": reorderable, - "data": lenData, + "data": len(data), } - if lenData < MinPktLength { - logger.WithFields(p.LogFields()).WithFields(fields).WithField("minimum_packet_Length", MinPktLength).Debug("Ignore packet smaller than allowed minimum") + if len(data) < MinPktLength { + logger.WithFields(p.LogFields()).WithFields(fields).WithField( + "minimum_packet_Length", MinPktLength, + ).Debug("Ignore packet smaller than allowed minimum") return false } - if !p.Encless && lenData > len(p.bufR)-chacha20InternalBlockSize { + if !p.Encless && len(data) > len(p.bufR)-CC20IBS { return false } var out []byte p.BusyR.Lock() // TODO use defer to unlock? - copy(p.nonceR[8:], data[lenData-NonceSize:]) + copy(p.nonceR[8:], data[len(data)-NonceSize:]) if p.Encless { var err error - out, err = EnclessDecode(p.key, p.nonceR, data[:lenData-NonceSize]) + out, err = EnclessDecode(p.key, p.nonceR, data[:len(data)-NonceSize]) if err != nil { logger.WithFields(p.LogFields()).WithError(err).Debug("Failed to decode encless") p.FramesUnauth++ @@ -384,10 +386,10 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { for i := 0; i < SSize; i++ { p.bufR[i] = 0 } - copy(p.bufR[chacha20InternalBlockSize:], data[tagSize:]) + copy(p.bufR[CC20IBS:], data[tagSize:]) chacha20.XORKeyStream( - p.bufR[:chacha20InternalBlockSize+lenData-tagSize-NonceSize], - p.bufR[:chacha20InternalBlockSize+lenData-tagSize-NonceSize], + p.bufR[:CC20IBS+len(data)-tagSize-NonceSize], + p.bufR[:CC20IBS+len(data)-tagSize-NonceSize], p.nonceR, p.key, ) @@ -398,11 +400,11 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { p.BusyR.Unlock() return false } - out = p.bufR[chacha20InternalBlockSize : chacha20InternalBlockSize+lenData-tagSize-NonceSize] + out = p.bufR[CC20IBS : CC20IBS+len(data)-tagSize-NonceSize] } if reorderable { - copy(p.nonceRecv[:], data[lenData-NonceSize:]) + copy(p.nonceRecv[:], data[len(data)-NonceSize:]) _, foundL := p.nonceBucketL[p.nonceRecv] _, foundM := p.nonceBucketM[p.nonceRecv] _, foundH := p.nonceBucketH[p.nonceRecv] @@ -434,7 +436,7 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { } } } else { - if subtle.ConstantTimeCompare(data[lenData-NonceSize:], p.NonceExpect) != 1 { + if subtle.ConstantTimeCompare(data[len(data)-NonceSize:], p.NonceExpect) != 1 { p.FramesDup++ p.BusyR.Unlock() return false @@ -443,7 +445,7 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { } p.FramesIn++ - atomic.AddUint64(&p.BytesIn, uint64(lenData)) + atomic.AddUint64(&p.BytesIn, uint64(len(data))) p.LastPing = time.Now() p.pktSizeR = bytes.LastIndexByte(out, padByte) if p.pktSizeR == -1 { @@ -490,13 +492,21 @@ func PeerTapProcessor(peer *Peer, tap *TAP, terminator chan struct{}) { now = time.Now() if lastSent.Add(peer.Timeout).Before(now) { if err = peer.EthProcess(nil); err != nil { - logger.WithFields(fields).WithFields(peer.LogFields()).WithError(err).Warn("Can't process nil ethernet packet") + logger.WithFields( + fields, + ).WithFields( + peer.LogFields(), + ).WithError(err).Warn( + "Can't process nil ethernet packet", + ) } lastSent = now } case data = <-tap.Sink: if err = peer.EthProcess(data); err != nil { - logger.WithFields(fields).WithFields(peer.LogFields()).WithError(err).Warn("Can't process ethernet packet") + logger.WithFields(fields).WithFields( + peer.LogFields(), + ).WithError(err).Warn("Can't process ethernet packet") } lastSent = time.Now() } @@ -510,13 +520,17 @@ func PeerTapProcessor(peer *Peer, tap *TAP, terminator chan struct{}) { break CPRProcessor case data = <-tap.Sink: if err = peer.EthProcess(data); err != nil { - logger.WithFields(fields).WithFields(peer.LogFields()).WithError(err).Warn("Can't process ethernet packet") + logger.WithFields(fields).WithFields( + peer.LogFields(), + ).WithError(err).Warn("Can't process ethernet packet") } default: } if data == nil { if err = peer.EthProcess(nil); err != nil { - logger.WithFields(fields).WithFields(peer.LogFields()).WithError(err).Warn("Can't process nil ethernet packet") + logger.WithFields(fields).WithFields( + peer.LogFields(), + ).WithError(err).Warn("Can't process nil ethernet packet") } } time.Sleep(peer.CPRCycle)