Reuse compression writers
This commit is contained in:
parent
4625bdf5cb
commit
f16d14cfa6
2 changed files with 45 additions and 34 deletions
|
@ -670,39 +670,32 @@ func Test1xxResponses(t *testing.T) {
|
|||
assert.NotEqualValues(t, body, fakeBody)
|
||||
}
|
||||
|
||||
func BenchmarkCompress(b *testing.B) {
|
||||
func BenchmarkCompressGzip(b *testing.B) {
|
||||
runCompressionBenchmark(b, gzipName)
|
||||
}
|
||||
|
||||
func BenchmarkCompressBrotli(b *testing.B) {
|
||||
runCompressionBenchmark(b, brotliName)
|
||||
}
|
||||
|
||||
func BenchmarkCompressZstandard(b *testing.B) {
|
||||
runCompressionBenchmark(b, zstdName)
|
||||
}
|
||||
|
||||
func runCompressionBenchmark(b *testing.B, algorithm string) {
|
||||
b.Helper()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
parallel bool
|
||||
size int
|
||||
}{
|
||||
{
|
||||
name: "2k",
|
||||
size: 2048,
|
||||
},
|
||||
{
|
||||
name: "20k",
|
||||
size: 20480,
|
||||
},
|
||||
{
|
||||
name: "100k",
|
||||
size: 102400,
|
||||
},
|
||||
{
|
||||
name: "2k parallel",
|
||||
parallel: true,
|
||||
size: 2048,
|
||||
},
|
||||
{
|
||||
name: "20k parallel",
|
||||
parallel: true,
|
||||
size: 20480,
|
||||
},
|
||||
{
|
||||
name: "100k parallel",
|
||||
parallel: true,
|
||||
size: 102400,
|
||||
},
|
||||
{"2k", false, 2048},
|
||||
{"20k", false, 20480},
|
||||
{"100k", false, 102400},
|
||||
{"2k parallel", true, 2048},
|
||||
{"20k parallel", true, 20480},
|
||||
{"100k parallel", true, 102400},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
@ -716,7 +709,7 @@ func BenchmarkCompress(b *testing.B) {
|
|||
handler, _ := New(context.Background(), next, dynamic.Compress{}, "testing")
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/whatever", nil)
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
req.Header.Set("Accept-Encoding", algorithm)
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(test.size))
|
||||
|
@ -724,7 +717,7 @@ func BenchmarkCompress(b *testing.B) {
|
|||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
runBenchmark(b, req, handler)
|
||||
runBenchmark(b, req, handler, algorithm)
|
||||
}
|
||||
})
|
||||
return
|
||||
|
@ -732,13 +725,13 @@ func BenchmarkCompress(b *testing.B) {
|
|||
|
||||
b.ResetTimer()
|
||||
for range b.N {
|
||||
runBenchmark(b, req, handler)
|
||||
runBenchmark(b, req, handler, algorithm)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) {
|
||||
func runBenchmark(b *testing.B, req *http.Request, handler http.Handler, algorithm string) {
|
||||
b.Helper()
|
||||
|
||||
res := httptest.NewRecorder()
|
||||
|
@ -747,7 +740,7 @@ func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) {
|
|||
b.Fatalf("Expected 200 but got %d", code)
|
||||
}
|
||||
|
||||
assert.Equal(b, gzipName, res.Header().Get(contentEncodingHeader))
|
||||
assert.Equal(b, algorithm, res.Header().Get(contentEncodingHeader))
|
||||
}
|
||||
|
||||
func generateBytes(length int) []byte {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
|
@ -45,6 +46,7 @@ type CompressionHandler struct {
|
|||
excludedContentTypes []parsedContentType
|
||||
includedContentTypes []parsedContentType
|
||||
next http.Handler
|
||||
writerPool sync.Pool
|
||||
}
|
||||
|
||||
// NewCompressionHandler returns a new compressing handler.
|
||||
|
@ -92,7 +94,7 @@ func NewCompressionHandler(cfg Config, next http.Handler) (http.Handler, error)
|
|||
func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Add(vary, acceptEncoding)
|
||||
|
||||
compressionWriter, err := newCompressionWriter(c.cfg.Algorithm, rw)
|
||||
compressionWriter, err := c.getCompressionWriter(rw)
|
||||
if err != nil {
|
||||
logger := middlewares.GetLogger(r.Context(), c.cfg.MiddlewareName, typeName)
|
||||
logger.Debug().Msgf("Create compression handler: %v", err)
|
||||
|
@ -100,6 +102,7 @@ func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
|||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer c.putCompressionWriter(compressionWriter)
|
||||
|
||||
responseWriter := &responseWriter{
|
||||
rw: rw,
|
||||
|
@ -130,6 +133,8 @@ type compression interface {
|
|||
// as it would otherwise send some extra "end of compression" bytes.
|
||||
// Close also makes sure to flush whatever was left to write from the buffer.
|
||||
Close() error
|
||||
// Reset reinitializes the state of the encoder, allowing it to be reused.
|
||||
Reset(w io.Writer)
|
||||
}
|
||||
|
||||
type compressionWriter struct {
|
||||
|
@ -137,6 +142,19 @@ type compressionWriter struct {
|
|||
alg string
|
||||
}
|
||||
|
||||
func (c *CompressionHandler) getCompressionWriter(rw io.Writer) (*compressionWriter, error) {
|
||||
if writer, ok := c.writerPool.Get().(*compressionWriter); ok {
|
||||
writer.compression.Reset(rw)
|
||||
return writer, nil
|
||||
}
|
||||
return newCompressionWriter(c.cfg.Algorithm, rw)
|
||||
}
|
||||
|
||||
func (c *CompressionHandler) putCompressionWriter(writer *compressionWriter) {
|
||||
writer.Reset(nil)
|
||||
c.writerPool.Put(writer)
|
||||
}
|
||||
|
||||
func newCompressionWriter(algo string, in io.Writer) (*compressionWriter, error) {
|
||||
switch algo {
|
||||
case brotliName:
|
||||
|
|
Loading…
Reference in a new issue