Skip to content

Commit

Permalink
rpc: use sync.Pools for snappy readers and writers
Browse files Browse the repository at this point in the history
Creating a snappy.{Reader,Writer} on every compression/decompression is
fairly expensive. This reduced the performance hit for enabling
compression in a local 3-node cluster from 38% to 3%.

See #14721
  • Loading branch information
petermattis committed May 10, 2017
1 parent fdc31f8 commit 0c33cee
Showing 1 changed file with 27 additions and 5 deletions.
32 changes: 27 additions & 5 deletions pkg/rpc/snappy.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,33 @@ package rpc
import (
"io"
"io/ioutil"
"sync"

"github.com/golang/snappy"
)

// NB: The grpc.{Compressor,Decompressor} implementations need to be goroutine
// safe as multiple goroutines may be using the same compressor/decompressor
// for different streams on the same connection.
var snappyWriterPool sync.Pool
var snappyReaderPool sync.Pool

type snappyCompressor struct {
}

func (snappyCompressor) Do(w io.Writer, p []byte) error {
z := snappy.NewBufferedWriter(w)
if _, err := z.Write(p); err != nil {
return err
z, ok := snappyWriterPool.Get().(*snappy.Writer)
if !ok {
z = snappy.NewBufferedWriter(w)
} else {
z.Reset(w)
}
_, err := z.Write(p)
if err == nil {
err = z.Flush()
}
return z.Close()
snappyWriterPool.Put(z)
return err
}

func (snappyCompressor) Type() string {
Expand All @@ -40,7 +54,15 @@ type snappyDecompressor struct {
}

func (snappyDecompressor) Do(r io.Reader) ([]byte, error) {
return ioutil.ReadAll(snappy.NewReader(r))
z, ok := snappyReaderPool.Get().(*snappy.Reader)
if !ok {
z = snappy.NewReader(r)
} else {
z.Reset(r)
}
b, err := ioutil.ReadAll(z)
snappyReaderPool.Put(z)
return b, err
}

func (snappyDecompressor) Type() string {
Expand Down

0 comments on commit 0c33cee

Please sign in to comment.