Highly-opinionated (ex-bullshit-free) MTPROTO proxy for Telegram. If you use v1.0 or upgrade broke you proxy, please read the chapter Version 2
Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

stack_bench_test.go 4.4KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. package relay
  2. import (
  3. "fmt"
  4. "runtime"
  5. "sync"
  6. "testing"
  7. "github.com/dolonet/mtg-multi/mtglib/internal/tls"
  8. )
  9. // BenchmarkStackVsPool measures memory consumption when N goroutines hold
  10. // either a stack-allocated buffer or a pool-allocated buffer.
  11. // Each goroutine simulates one pump direction of a relay connection.
  12. // Real connections have 2 pumps each, so N goroutines ≈ N/2 connections.
  13. func BenchmarkStackMemory(b *testing.B) {
  14. for _, numGoroutines := range []int{100, 500, 1000, 2000} {
  15. b.Run(fmt.Sprintf("goroutines=%d", numGoroutines), func(b *testing.B) {
  16. for i := 0; i < b.N; i++ {
  17. var memBefore, memAfter runtime.MemStats
  18. runtime.GC()
  19. runtime.ReadMemStats(&memBefore)
  20. var wg sync.WaitGroup
  21. ready := make(chan struct{}, numGoroutines)
  22. stop := make(chan struct{})
  23. wg.Add(numGoroutines)
  24. for j := 0; j < numGoroutines; j++ {
  25. go blockingReadStack(&wg, ready, stop)
  26. }
  27. // Wait for all goroutines to be ready (holding their buffers)
  28. for j := 0; j < numGoroutines; j++ {
  29. <-ready
  30. }
  31. runtime.ReadMemStats(&memAfter)
  32. stackDelta := memAfter.StackInuse - memBefore.StackInuse
  33. heapDelta := memAfter.HeapInuse - memBefore.HeapInuse
  34. totalDelta := stackDelta + heapDelta
  35. b.ReportMetric(float64(stackDelta), "stack_bytes")
  36. b.ReportMetric(float64(heapDelta), "heap_bytes")
  37. b.ReportMetric(float64(totalDelta), "total_bytes")
  38. b.ReportMetric(float64(stackDelta)/float64(numGoroutines), "stack_per_goroutine")
  39. close(stop)
  40. wg.Wait()
  41. }
  42. })
  43. }
  44. }
  45. func BenchmarkPoolMemory_16KB(b *testing.B) {
  46. benchmarkPoolMemory(b, tls.MaxRecordPayloadSize)
  47. }
  48. func BenchmarkPoolMemory_4KB(b *testing.B) {
  49. benchmarkPoolMemory(b, 4096)
  50. }
  51. func benchmarkPoolMemory(b *testing.B, poolBufSize int) {
  52. b.Helper()
  53. pool := &sync.Pool{
  54. New: func() any {
  55. buf := make([]byte, poolBufSize)
  56. return &buf
  57. },
  58. }
  59. for _, numGoroutines := range []int{100, 500, 1000, 2000} {
  60. b.Run(fmt.Sprintf("goroutines=%d", numGoroutines), func(b *testing.B) {
  61. for i := 0; i < b.N; i++ {
  62. var memBefore, memAfter runtime.MemStats
  63. // Ensure pool is empty
  64. runtime.GC()
  65. runtime.ReadMemStats(&memBefore)
  66. var wg sync.WaitGroup
  67. ready := make(chan struct{}, numGoroutines)
  68. stop := make(chan struct{})
  69. wg.Add(numGoroutines)
  70. for j := 0; j < numGoroutines; j++ {
  71. go blockingReadPool(&wg, ready, stop, pool)
  72. }
  73. for j := 0; j < numGoroutines; j++ {
  74. <-ready
  75. }
  76. runtime.ReadMemStats(&memAfter)
  77. stackDelta := memAfter.StackInuse - memBefore.StackInuse
  78. heapDelta := memAfter.HeapInuse - memBefore.HeapInuse
  79. totalDelta := stackDelta + heapDelta
  80. b.ReportMetric(float64(stackDelta), "stack_bytes")
  81. b.ReportMetric(float64(heapDelta), "heap_bytes")
  82. b.ReportMetric(float64(totalDelta), "total_bytes")
  83. b.ReportMetric(float64(stackDelta)/float64(numGoroutines), "stack_per_goroutine")
  84. close(stop)
  85. wg.Wait()
  86. }
  87. })
  88. }
  89. }
  90. // BenchmarkPoolMemory_Burst tests the scenario 9seconds described:
  91. // connections come in bursts, pool holds unused buffers between bursts.
  92. func BenchmarkPoolMemory_Burst(b *testing.B) {
  93. for _, poolBufSize := range []int{4096, 16379} {
  94. b.Run(fmt.Sprintf("poolBuf=%d", poolBufSize), func(b *testing.B) {
  95. pool := &sync.Pool{
  96. New: func() any {
  97. buf := make([]byte, poolBufSize)
  98. return &buf
  99. },
  100. }
  101. for i := 0; i < b.N; i++ {
  102. // Burst 1: 500 goroutines
  103. var wg sync.WaitGroup
  104. ready := make(chan struct{}, 500)
  105. stop := make(chan struct{})
  106. wg.Add(500)
  107. for j := 0; j < 500; j++ {
  108. go blockingReadPool(&wg, ready, stop, pool)
  109. }
  110. for j := 0; j < 500; j++ {
  111. <-ready
  112. }
  113. close(stop)
  114. wg.Wait()
  115. // Between bursts: measure idle pool memory
  116. var memAfterBurst runtime.MemStats
  117. runtime.ReadMemStats(&memAfterBurst)
  118. // Burst 2: 500 goroutines again (pool should reuse)
  119. ready2 := make(chan struct{}, 500)
  120. stop2 := make(chan struct{})
  121. wg.Add(500)
  122. for j := 0; j < 500; j++ {
  123. go blockingReadPool(&wg, ready2, stop2, pool)
  124. }
  125. for j := 0; j < 500; j++ {
  126. <-ready2
  127. }
  128. var memDuringBurst2 runtime.MemStats
  129. runtime.ReadMemStats(&memDuringBurst2)
  130. b.ReportMetric(float64(memAfterBurst.HeapInuse), "idle_heap_bytes")
  131. b.ReportMetric(float64(memDuringBurst2.HeapInuse), "burst2_heap_bytes")
  132. b.ReportMetric(float64(memDuringBurst2.StackInuse), "burst2_stack_bytes")
  133. close(stop2)
  134. wg.Wait()
  135. }
  136. })
  137. }
  138. }