Highly-opinionated (ex-bullshit-free) MTPROTO proxy for Telegram. If you use v1.0 or upgrade broke you proxy, please read the chapter Version 2
Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. package main
  2. import (
  3. "crypto/rand"
  4. "flag"
  5. "fmt"
  6. "io"
  7. "net"
  8. "os"
  9. "runtime"
  10. "runtime/debug"
  11. "sync"
  12. "sync/atomic"
  13. "time"
  14. )
  15. const (
  16. maxRecordPayloadSize = 16379
  17. maxRecordSize = 16384
  18. )
  19. // --- Buffer strategies ---
  20. type bufStrategy interface {
  21. Name() string
  22. Pump(src, dst net.Conn) (int64, error)
  23. }
  24. // Stack-allocated buffer (current mtg code)
  25. type stackStrategy struct{}
  26. func (stackStrategy) Name() string { return "stack" }
  27. func (stackStrategy) Pump(src, dst net.Conn) (int64, error) {
  28. var buf [maxRecordPayloadSize]byte
  29. return io.CopyBuffer(dst, src, buf[:])
  30. }
  31. // Pool-allocated buffer
  32. var relayPool = sync.Pool{
  33. New: func() any {
  34. b := make([]byte, maxRecordPayloadSize)
  35. return &b
  36. },
  37. }
  38. type poolStrategy struct{}
  39. func (poolStrategy) Name() string { return "pool" }
  40. func (poolStrategy) Pump(src, dst net.Conn) (int64, error) {
  41. bp := relayPool.Get().(*[]byte)
  42. defer relayPool.Put(bp)
  43. return io.CopyBuffer(dst, src, *bp)
  44. }
  45. // --- Memory measurement ---
  46. type memSnapshot struct {
  47. StackInuse uint64
  48. HeapInuse uint64
  49. HeapAlloc uint64
  50. NumGC uint32
  51. PauseTotalNs uint64
  52. NumGoroutine int
  53. }
  54. func snapMem() memSnapshot {
  55. runtime.GC()
  56. var m runtime.MemStats
  57. runtime.ReadMemStats(&m)
  58. return memSnapshot{
  59. StackInuse: m.StackInuse,
  60. HeapInuse: m.HeapInuse,
  61. HeapAlloc: m.HeapAlloc,
  62. NumGC: m.NumGC,
  63. PauseTotalNs: m.PauseTotalNs,
  64. NumGoroutine: runtime.NumGoroutine(),
  65. }
  66. }
  67. // --- Test harness ---
  68. func runTest(strat bufStrategy, conns int, dataPerConn int64, reportInterval time.Duration) {
  69. fmt.Printf("\n=== %s strategy, %d connections, %s per conn ===\n",
  70. strat.Name(), conns, formatBytes(dataPerConn))
  71. // Start "telegram" echo servers - one listener, accepts all
  72. echoLn, err := net.Listen("tcp", "127.0.0.1:0")
  73. if err != nil {
  74. fmt.Fprintf(os.Stderr, "echo listen: %v\n", err)
  75. return
  76. }
  77. defer echoLn.Close()
  78. echoAddr := echoLn.Addr().String()
  79. // Echo server goroutines
  80. var echoWg sync.WaitGroup
  81. go func() {
  82. for {
  83. c, err := echoLn.Accept()
  84. if err != nil {
  85. return
  86. }
  87. echoWg.Add(1)
  88. go func(c net.Conn) {
  89. defer echoWg.Done()
  90. defer c.Close()
  91. io.Copy(c, c) //nolint: errcheck
  92. }(c)
  93. }
  94. }()
  95. // Start relay listener
  96. relayLn, err := net.Listen("tcp", "127.0.0.1:0")
  97. if err != nil {
  98. fmt.Fprintf(os.Stderr, "relay listen: %v\n", err)
  99. return
  100. }
  101. defer relayLn.Close()
  102. relayAddr := relayLn.Addr().String()
  103. // Relay server
  104. var relayWg sync.WaitGroup
  105. go func() {
  106. for {
  107. client, err := relayLn.Accept()
  108. if err != nil {
  109. return
  110. }
  111. relayWg.Add(1)
  112. go func(client net.Conn) {
  113. defer relayWg.Done()
  114. defer client.Close()
  115. tg, err := net.Dial("tcp", echoAddr)
  116. if err != nil {
  117. return
  118. }
  119. defer tg.Close()
  120. // Bidirectional relay (like mtg relay.Relay)
  121. done := make(chan struct{})
  122. go func() {
  123. defer close(done)
  124. strat.Pump(client, tg) //nolint: errcheck
  125. // When one direction is done, close both to unblock the other
  126. client.Close() //nolint: errcheck
  127. tg.Close() //nolint: errcheck
  128. }()
  129. strat.Pump(tg, client) //nolint: errcheck
  130. client.Close() //nolint: errcheck
  131. tg.Close() //nolint: errcheck
  132. <-done
  133. }(client)
  134. }
  135. }()
  136. // Force GC and take baseline
  137. debug.SetGCPercent(100)
  138. runtime.GC()
  139. runtime.GC()
  140. time.Sleep(50 * time.Millisecond)
  141. before := snapMem()
  142. // Launch clients
  143. var (
  144. totalBytes atomic.Int64
  145. clientWg sync.WaitGroup
  146. startSignal = make(chan struct{})
  147. peakMem atomic.Uint64
  148. )
  149. // Memory sampler
  150. samplerDone := make(chan struct{})
  151. samplerStopped := make(chan struct{})
  152. go func() {
  153. defer close(samplerStopped)
  154. ticker := time.NewTicker(10 * time.Millisecond)
  155. defer ticker.Stop()
  156. for {
  157. select {
  158. case <-samplerDone:
  159. return
  160. case <-ticker.C:
  161. var m runtime.MemStats
  162. runtime.ReadMemStats(&m)
  163. total := m.StackInuse + m.HeapInuse
  164. for {
  165. old := peakMem.Load()
  166. if total <= old || peakMem.CompareAndSwap(old, total) {
  167. break
  168. }
  169. }
  170. }
  171. }
  172. }()
  173. for i := 0; i < conns; i++ {
  174. clientWg.Add(1)
  175. go func() {
  176. defer clientWg.Done()
  177. <-startSignal
  178. conn, err := net.Dial("tcp", relayAddr)
  179. if err != nil {
  180. fmt.Fprintf(os.Stderr, "client dial: %v\n", err)
  181. return
  182. }
  183. defer conn.Close()
  184. // Write data in chunks, read it back (echo)
  185. chunk := make([]byte, 4096)
  186. rand.Read(chunk) //nolint: errcheck
  187. readBuf := make([]byte, 4096)
  188. var written int64
  189. for written < dataPerConn {
  190. toWrite := int64(len(chunk))
  191. if written+toWrite > dataPerConn {
  192. toWrite = dataPerConn - written
  193. }
  194. n, err := conn.Write(chunk[:toWrite])
  195. if err != nil {
  196. return
  197. }
  198. written += int64(n)
  199. // Read back echo
  200. remaining := n
  201. for remaining > 0 {
  202. rn, err := conn.Read(readBuf)
  203. if err != nil {
  204. return
  205. }
  206. remaining -= rn
  207. }
  208. totalBytes.Add(int64(n * 2)) // write + read
  209. }
  210. }()
  211. }
  212. start := time.Now()
  213. close(startSignal)
  214. // Progress reporter
  215. reporterDone := make(chan struct{})
  216. if reportInterval > 0 {
  217. go func() {
  218. ticker := time.NewTicker(reportInterval)
  219. defer ticker.Stop()
  220. for {
  221. select {
  222. case <-reporterDone:
  223. return
  224. case <-ticker.C:
  225. elapsed := time.Since(start)
  226. bytes := totalBytes.Load()
  227. fmt.Printf(" [%.1fs] %s transferred, %.1f MB/s\n",
  228. elapsed.Seconds(), formatBytes(bytes),
  229. float64(bytes)/elapsed.Seconds()/1024/1024)
  230. }
  231. }
  232. }()
  233. }
  234. clientWg.Wait()
  235. close(reporterDone)
  236. elapsed := time.Since(start)
  237. // Stop sampler
  238. close(samplerDone)
  239. <-samplerStopped
  240. after := snapMem()
  241. // Results
  242. bytes := totalBytes.Load()
  243. throughput := float64(bytes) / elapsed.Seconds() / 1024 / 1024
  244. gcCycles := after.NumGC - before.NumGC
  245. gcPause := time.Duration(after.PauseTotalNs - before.PauseTotalNs)
  246. peak := peakMem.Load()
  247. baseMem := before.StackInuse + before.HeapInuse
  248. fmt.Printf("\nResults:\n")
  249. fmt.Printf(" Duration: %v\n", elapsed.Round(time.Millisecond))
  250. fmt.Printf(" Total data: %s\n", formatBytes(bytes))
  251. fmt.Printf(" Throughput: %.1f MB/s\n", throughput)
  252. fmt.Printf(" Peak memory: %s (baseline %s, delta %s)\n",
  253. formatBytes(int64(peak)), formatBytes(int64(baseMem)),
  254. formatBytes(int64(peak)-int64(baseMem)))
  255. fmt.Printf(" Stack (before): %s → (after): %s\n",
  256. formatBytes(int64(before.StackInuse)), formatBytes(int64(after.StackInuse)))
  257. fmt.Printf(" Heap (before): %s → (after): %s\n",
  258. formatBytes(int64(before.HeapInuse)), formatBytes(int64(after.HeapInuse)))
  259. fmt.Printf(" Goroutines: %d → %d\n", before.NumGoroutine, after.NumGoroutine)
  260. fmt.Printf(" GC cycles: %d\n", gcCycles)
  261. fmt.Printf(" GC total pause: %v\n", gcPause)
  262. if gcCycles > 0 {
  263. fmt.Printf(" GC avg pause: %v\n", gcPause/time.Duration(gcCycles))
  264. }
  265. // Cleanup
  266. relayLn.Close()
  267. echoLn.Close()
  268. relayWg.Wait()
  269. echoWg.Wait()
  270. runtime.GC()
  271. time.Sleep(100 * time.Millisecond)
  272. }
  273. func formatBytes(b int64) string {
  274. switch {
  275. case b >= 1024*1024*1024:
  276. return fmt.Sprintf("%.1f GB", float64(b)/1024/1024/1024)
  277. case b >= 1024*1024:
  278. return fmt.Sprintf("%.1f MB", float64(b)/1024/1024)
  279. case b >= 1024:
  280. return fmt.Sprintf("%.1f KB", float64(b)/1024)
  281. default:
  282. return fmt.Sprintf("%d B", b)
  283. }
  284. }
  285. func main() {
  286. conns := flag.Int("conns", 500, "number of concurrent connections")
  287. dataMB := flag.Int("data", 1, "MB of data per connection")
  288. strategy := flag.String("strategy", "both", "buffer strategy: stack, pool, or both")
  289. flag.Parse()
  290. dataPerConn := int64(*dataMB) * 1024 * 1024
  291. fmt.Printf("Real network relay benchmark\n")
  292. fmt.Printf("GOMAXPROCS=%d, OS=%s/%s\n", runtime.GOMAXPROCS(0), runtime.GOOS, runtime.GOARCH)
  293. fmt.Printf("Connections: %d, Data per conn: %s\n\n", *conns, formatBytes(dataPerConn))
  294. switch *strategy {
  295. case "stack":
  296. runTest(stackStrategy{}, *conns, dataPerConn, 2*time.Second)
  297. case "pool":
  298. runTest(poolStrategy{}, *conns, dataPerConn, 2*time.Second)
  299. case "both":
  300. runTest(stackStrategy{}, *conns, dataPerConn, 2*time.Second)
  301. fmt.Println("\n" + "============================================================")
  302. runTest(poolStrategy{}, *conns, dataPerConn, 2*time.Second)
  303. }
  304. }