From bb60f10e8d825d49e635b840b5eb7512811256d9 Mon Sep 17 00:00:00 2001 From: Sergey Matveev Date: Sat, 25 Feb 2017 12:20:44 +0300 Subject: [PATCH] Revert lenData=len(data) assignment lenData := -- creates new variable each time, that is slow. Slices and arrays already contain their length. len() function call creates pretty optimal by performance code. --- src/cypherpunks.ru/govpn/identity.go | 9 ++++--- src/cypherpunks.ru/govpn/peer.go | 36 +++++++++++++--------------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/src/cypherpunks.ru/govpn/identity.go b/src/cypherpunks.ru/govpn/identity.go index 52d430d..81066bc 100644 --- a/src/cypherpunks.ru/govpn/identity.go +++ b/src/cypherpunks.ru/govpn/identity.go @@ -141,15 +141,14 @@ func AddTimeSync(ts int, data []byte) { // as plaintext and last bytes as cyphertext. func (mc *MACCache) Find(data []byte) (*PeerID, error) { const minimumSize = 8 * 2 - lenData := len(data) fields := logrus.Fields{ "func": logFuncPrefix + "MACCache.Find", - "data": lenData, + "data": len(data), "size": mc.Length(), } logger.WithFields(fields).Debug("Starting") - if lenData < minimumSize { - return nil, errors.Errorf("MAC is too small %d, minimum %d", lenData, minimumSize) + if len(data) < minimumSize { + return nil, errors.Errorf("MAC is too small %d, minimum %d", len(data), minimumSize) } buf := make([]byte, 8) sum := make([]byte, 32) @@ -171,7 +170,7 @@ func (mc *MACCache) Find(data []byte) (*PeerID, error) { mt.mac.Sum(sum[:0]) mt.l.Unlock() - if subtle.ConstantTimeCompare(sum[len(sum)-8:], data[lenData-8:]) == 1 { + if subtle.ConstantTimeCompare(sum[len(sum)-8:], data[len(data)-8:]) == 1 { logger.WithFields(fields).WithFields(loopFields).Debug("Matching peer") ppid := PeerID(pid) return &ppid, nil diff --git a/src/cypherpunks.ru/govpn/peer.go b/src/cypherpunks.ru/govpn/peer.go index 3856208..b466161 100644 --- a/src/cypherpunks.ru/govpn/peer.go +++ b/src/cypherpunks.ru/govpn/peer.go @@ -293,15 +293,14 @@ func newPeer(isClient bool, addr string, conn io.Writer, conf *PeerConf, key *[S // packets will be sent to remote Peer side immediately. func (p *Peer) EthProcess(data []byte) error { const paddingSize = 1 - lenData := len(data) - if lenData > p.MTU-paddingSize { + if len(data) > p.MTU-paddingSize { logger.WithFields(p.LogFields()).WithFields( p.ConfigurationLogFields(), ).WithFields( logrus.Fields{ "func": logFuncPrefix + "Peer.EthProcess", "padding": paddingSize, - "packet_size": lenData, + "packet_size": len(data), }).Warning("Ignore padded data packet larger than MTU") return nil } @@ -310,15 +309,15 @@ func (p *Peer) EthProcess(data []byte) error { // Zero size is a heartbeat packet SliceZero(p.bufT) - if lenData == 0 { + if len(data) == 0 { p.bufT[CC20IBS+0] = padByte p.HeartbeatSent++ } else { // Copy payload to our internal buffer and we are ready to // accept the next one copy(p.bufT[CC20IBS:], data) - p.bufT[CC20IBS+lenData] = padByte - p.BytesPayloadOut += uint64(lenData) + p.bufT[CC20IBS+len(data)] = padByte + p.BytesPayloadOut += uint64(len(data)) } if p.NoiseEnable && !p.Encless { @@ -326,7 +325,7 @@ func (p *Peer) EthProcess(data []byte) error { } else if p.Encless { p.frameT = p.bufT[CC20IBS : CC20IBS+p.MTU] } else { - p.frameT = p.bufT[CC20IBS : CC20IBS+lenData+1+NonceSize] + p.frameT = p.bufT[CC20IBS : CC20IBS+len(data)+1+NonceSize] } copy(p.frameT[len(p.frameT)-NonceSize:], (<-p.noncesT)[:]) var out []byte @@ -357,27 +356,26 @@ func (p *Peer) EthProcess(data []byte) error { // PktProcess processes data of a single packet func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { - lenData := len(data) fields := logrus.Fields{ "func": logFuncPrefix + "Peer.PktProcess", "reorderable": reorderable, - "data": lenData, + "data": len(data), } - if lenData < MinPktLength { + if len(data) < MinPktLength { logger.WithFields(p.LogFields()).WithFields(fields).WithField( "minimum_packet_Length", MinPktLength, ).Debug("Ignore packet smaller than allowed minimum") return false } - if !p.Encless && lenData > len(p.bufR)-CC20IBS { + if !p.Encless && len(data) > len(p.bufR)-CC20IBS { return false } var out []byte p.BusyR.Lock() // TODO use defer to unlock? - copy(p.nonceR[8:], data[lenData-NonceSize:]) + copy(p.nonceR[8:], data[len(data)-NonceSize:]) if p.Encless { var err error - out, err = EnclessDecode(p.key, p.nonceR, data[:lenData-NonceSize]) + out, err = EnclessDecode(p.key, p.nonceR, data[:len(data)-NonceSize]) if err != nil { logger.WithFields(p.LogFields()).WithError(err).Debug("Failed to decode encless") p.FramesUnauth++ @@ -390,8 +388,8 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { } copy(p.bufR[CC20IBS:], data[tagSize:]) chacha20.XORKeyStream( - p.bufR[:CC20IBS+lenData-tagSize-NonceSize], - p.bufR[:CC20IBS+lenData-tagSize-NonceSize], + p.bufR[:CC20IBS+len(data)-tagSize-NonceSize], + p.bufR[:CC20IBS+len(data)-tagSize-NonceSize], p.nonceR, p.key, ) @@ -402,11 +400,11 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { p.BusyR.Unlock() return false } - out = p.bufR[CC20IBS : CC20IBS+lenData-tagSize-NonceSize] + out = p.bufR[CC20IBS : CC20IBS+len(data)-tagSize-NonceSize] } if reorderable { - copy(p.nonceRecv[:], data[lenData-NonceSize:]) + copy(p.nonceRecv[:], data[len(data)-NonceSize:]) _, foundL := p.nonceBucketL[p.nonceRecv] _, foundM := p.nonceBucketM[p.nonceRecv] _, foundH := p.nonceBucketH[p.nonceRecv] @@ -438,7 +436,7 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { } } } else { - if subtle.ConstantTimeCompare(data[lenData-NonceSize:], p.NonceExpect) != 1 { + if subtle.ConstantTimeCompare(data[len(data)-NonceSize:], p.NonceExpect) != 1 { p.FramesDup++ p.BusyR.Unlock() return false @@ -447,7 +445,7 @@ func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool { } p.FramesIn++ - atomic.AddUint64(&p.BytesIn, uint64(lenData)) + atomic.AddUint64(&p.BytesIn, uint64(len(data))) p.LastPing = time.Now() p.pktSizeR = bytes.LastIndexByte(out, padByte) if p.pktSizeR == -1 { -- 2.44.0