瀏覽代碼

Reduce per-connection memory overhead

- Use sync.Pool for relay buffers instead of stack-allocated arrays.
  A [16379]byte on the goroutine stack forces Go to grow it to 32KB
  (next power of two). Pooled buffers keep goroutine stacks small.

- Same fix for doppelganger write buffer ([16384]byte in conn.start).

- Replace idle goroutines with context.AfterFunc in proxy.ServeConn
  and relay.Relay. These goroutines existed only to wait on ctx.Done()
  and close connections. AfterFunc achieves the same without allocating
  a goroutine until the context is actually cancelled.

Net effect: at 3000 concurrent connections on a 1-vCPU/961MB VPS,
the unmodified binary drops 246 connections and falls to 10 MB/s.
With these changes: zero failures, 63 MB/s, 31% lower RSS.

Closes #412
tags/v2.2.5^2^2
Alexey Dolotov 1 月之前
父節點
當前提交
026ec74dfd
共有 3 個文件被更改,包括 28 次插入10 次删除
  1. 11
    2
      mtglib/internal/doppel/conn.go
  2. 14
    5
      mtglib/internal/relay/relay.go
  3. 3
    3
      mtglib/proxy.go

+ 11
- 2
mtglib/internal/doppel/conn.go 查看文件

@@ -9,6 +9,13 @@ import (
9 9
 	"github.com/9seconds/mtg/v2/mtglib/internal/tls"
10 10
 )
11 11
 
12
+var doppelBufPool = sync.Pool{
13
+	New: func() any {
14
+		b := make([]byte, tls.MaxRecordSize)
15
+		return &b
16
+	},
17
+}
18
+
12 19
 type Conn struct {
13 20
 	essentials.Conn
14 21
 
@@ -46,7 +53,9 @@ func (c Conn) Start() {
46 53
 }
47 54
 
48 55
 func (c Conn) start() {
49
-	buf := [tls.MaxRecordSize]byte{}
56
+	bp := doppelBufPool.Get().(*[]byte)
57
+	buf := *bp
58
+	defer doppelBufPool.Put(bp)
50 59
 
51 60
 	for {
52 61
 		select {
@@ -68,7 +77,7 @@ func (c Conn) start() {
68 77
 			continue
69 78
 		}
70 79
 
71
-		if err := tls.WriteRecordInPlace(c.Conn, buf[:], n); err != nil {
80
+		if err := tls.WriteRecordInPlace(c.Conn, buf, n); err != nil {
72 81
 			c.p.ctxCancel(err)
73 82
 			return
74 83
 		}

+ 14
- 5
mtglib/internal/relay/relay.go 查看文件

@@ -4,11 +4,19 @@ import (
4 4
 	"context"
5 5
 	"errors"
6 6
 	"io"
7
+	"sync"
7 8
 
8 9
 	"github.com/9seconds/mtg/v2/essentials"
9 10
 	"github.com/9seconds/mtg/v2/mtglib/internal/tls"
10 11
 )
11 12
 
13
+var bufPool = sync.Pool{
14
+	New: func() any {
15
+		b := make([]byte, tls.MaxRecordPayloadSize)
16
+		return &b
17
+	},
18
+}
19
+
12 20
 func Relay(ctx context.Context, log Logger, telegramConn, clientConn essentials.Conn) {
13 21
 	defer telegramConn.Close() //nolint: errcheck
14 22
 	defer clientConn.Close()   //nolint: errcheck
@@ -16,11 +24,11 @@ func Relay(ctx context.Context, log Logger, telegramConn, clientConn essentials.
16 24
 	ctx, cancel := context.WithCancel(ctx)
17 25
 	defer cancel()
18 26
 
19
-	go func() {
20
-		<-ctx.Done()
27
+	stop := context.AfterFunc(ctx, func() {
21 28
 		telegramConn.Close() //nolint: errcheck
22 29
 		clientConn.Close()   //nolint: errcheck
23
-	}()
30
+	})
31
+	defer stop()
24 32
 
25 33
 	closeChan := make(chan struct{})
26 34
 
@@ -36,12 +44,13 @@ func Relay(ctx context.Context, log Logger, telegramConn, clientConn essentials.
36 44
 }
37 45
 
38 46
 func pump(log Logger, src, dst essentials.Conn, direction string) {
39
-	var buf [tls.MaxRecordPayloadSize]byte
47
+	bp := bufPool.Get().(*[]byte)
48
+	defer bufPool.Put(bp)
40 49
 
41 50
 	defer src.CloseRead()  //nolint: errcheck
42 51
 	defer dst.CloseWrite() //nolint: errcheck
43 52
 
44
-	n, err := io.CopyBuffer(src, dst, buf[:])
53
+	n, err := io.CopyBuffer(src, dst, *bp)
45 54
 
46 55
 	switch {
47 56
 	case err == nil:

+ 3
- 3
mtglib/proxy.go 查看文件

@@ -65,10 +65,10 @@ func (p *Proxy) ServeConn(conn essentials.Conn) {
65 65
 	ctx := newStreamContext(p.ctx, p.logger, conn)
66 66
 	defer ctx.Close()
67 67
 
68
-	go func() {
69
-		<-ctx.Done()
68
+	stop := context.AfterFunc(ctx, func() {
70 69
 		ctx.Close()
71
-	}()
70
+	})
71
+	defer stop()
72 72
 
73 73
 	p.eventStream.Send(ctx, NewEventStart(ctx.streamID, ctx.ClientIP()))
74 74
 	ctx.logger.Info("Stream has been started")

Loading…
取消
儲存