Reuse compression writers

This commit is contained in:
Michel Heusschen 2024-10-09 14:14:03 +02:00 committed by GitHub
parent 4625bdf5cb
commit f16d14cfa6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 45 additions and 34 deletions

View file

@ -670,39 +670,32 @@ func Test1xxResponses(t *testing.T) {
assert.NotEqualValues(t, body, fakeBody) assert.NotEqualValues(t, body, fakeBody)
} }
func BenchmarkCompress(b *testing.B) { func BenchmarkCompressGzip(b *testing.B) {
runCompressionBenchmark(b, gzipName)
}
func BenchmarkCompressBrotli(b *testing.B) {
runCompressionBenchmark(b, brotliName)
}
func BenchmarkCompressZstandard(b *testing.B) {
runCompressionBenchmark(b, zstdName)
}
func runCompressionBenchmark(b *testing.B, algorithm string) {
b.Helper()
testCases := []struct { testCases := []struct {
name string name string
parallel bool parallel bool
size int size int
}{ }{
{ {"2k", false, 2048},
name: "2k", {"20k", false, 20480},
size: 2048, {"100k", false, 102400},
}, {"2k parallel", true, 2048},
{ {"20k parallel", true, 20480},
name: "20k", {"100k parallel", true, 102400},
size: 20480,
},
{
name: "100k",
size: 102400,
},
{
name: "2k parallel",
parallel: true,
size: 2048,
},
{
name: "20k parallel",
parallel: true,
size: 20480,
},
{
name: "100k parallel",
parallel: true,
size: 102400,
},
} }
for _, test := range testCases { for _, test := range testCases {
@ -716,7 +709,7 @@ func BenchmarkCompress(b *testing.B) {
handler, _ := New(context.Background(), next, dynamic.Compress{}, "testing") handler, _ := New(context.Background(), next, dynamic.Compress{}, "testing")
req, _ := http.NewRequest(http.MethodGet, "/whatever", nil) req, _ := http.NewRequest(http.MethodGet, "/whatever", nil)
req.Header.Set("Accept-Encoding", "gzip") req.Header.Set("Accept-Encoding", algorithm)
b.ReportAllocs() b.ReportAllocs()
b.SetBytes(int64(test.size)) b.SetBytes(int64(test.size))
@ -724,7 +717,7 @@ func BenchmarkCompress(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {
runBenchmark(b, req, handler) runBenchmark(b, req, handler, algorithm)
} }
}) })
return return
@ -732,13 +725,13 @@ func BenchmarkCompress(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for range b.N { for range b.N {
runBenchmark(b, req, handler) runBenchmark(b, req, handler, algorithm)
} }
}) })
} }
} }
func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) { func runBenchmark(b *testing.B, req *http.Request, handler http.Handler, algorithm string) {
b.Helper() b.Helper()
res := httptest.NewRecorder() res := httptest.NewRecorder()
@ -747,7 +740,7 @@ func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) {
b.Fatalf("Expected 200 but got %d", code) b.Fatalf("Expected 200 but got %d", code)
} }
assert.Equal(b, gzipName, res.Header().Get(contentEncodingHeader)) assert.Equal(b, algorithm, res.Header().Get(contentEncodingHeader))
} }
func generateBytes(length int) []byte { func generateBytes(length int) []byte {

View file

@ -8,6 +8,7 @@ import (
"mime" "mime"
"net" "net"
"net/http" "net/http"
"sync"
"github.com/andybalholm/brotli" "github.com/andybalholm/brotli"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
@ -45,6 +46,7 @@ type CompressionHandler struct {
excludedContentTypes []parsedContentType excludedContentTypes []parsedContentType
includedContentTypes []parsedContentType includedContentTypes []parsedContentType
next http.Handler next http.Handler
writerPool sync.Pool
} }
// NewCompressionHandler returns a new compressing handler. // NewCompressionHandler returns a new compressing handler.
@ -92,7 +94,7 @@ func NewCompressionHandler(cfg Config, next http.Handler) (http.Handler, error)
func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
rw.Header().Add(vary, acceptEncoding) rw.Header().Add(vary, acceptEncoding)
compressionWriter, err := newCompressionWriter(c.cfg.Algorithm, rw) compressionWriter, err := c.getCompressionWriter(rw)
if err != nil { if err != nil {
logger := middlewares.GetLogger(r.Context(), c.cfg.MiddlewareName, typeName) logger := middlewares.GetLogger(r.Context(), c.cfg.MiddlewareName, typeName)
logger.Debug().Msgf("Create compression handler: %v", err) logger.Debug().Msgf("Create compression handler: %v", err)
@ -100,6 +102,7 @@ func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request)
rw.WriteHeader(http.StatusInternalServerError) rw.WriteHeader(http.StatusInternalServerError)
return return
} }
defer c.putCompressionWriter(compressionWriter)
responseWriter := &responseWriter{ responseWriter := &responseWriter{
rw: rw, rw: rw,
@ -130,6 +133,8 @@ type compression interface {
// as it would otherwise send some extra "end of compression" bytes. // as it would otherwise send some extra "end of compression" bytes.
// Close also makes sure to flush whatever was left to write from the buffer. // Close also makes sure to flush whatever was left to write from the buffer.
Close() error Close() error
// Reset reinitializes the state of the encoder, allowing it to be reused.
Reset(w io.Writer)
} }
type compressionWriter struct { type compressionWriter struct {
@ -137,6 +142,19 @@ type compressionWriter struct {
alg string alg string
} }
func (c *CompressionHandler) getCompressionWriter(rw io.Writer) (*compressionWriter, error) {
if writer, ok := c.writerPool.Get().(*compressionWriter); ok {
writer.compression.Reset(rw)
return writer, nil
}
return newCompressionWriter(c.cfg.Algorithm, rw)
}
func (c *CompressionHandler) putCompressionWriter(writer *compressionWriter) {
writer.Reset(nil)
c.writerPool.Put(writer)
}
func newCompressionWriter(algo string, in io.Writer) (*compressionWriter, error) { func newCompressionWriter(algo string, in io.Writer) (*compressionWriter, error) {
switch algo { switch algo {
case brotliName: case brotliName: