Procházet zdrojové kódy

Merge pull request #358 from 9seconds/doppel-drs

Make DRS optional
tags/v2.2.0^2^2
Sergei Arkhipov před 1 měsícem
rodič
revize
6d8d2961e8
Žádný účet není propojen s e-mailovou adresou tvůrce revize

+ 31
- 32
example.config.toml Zobrazit soubor

@@ -213,42 +213,20 @@ idle = "1m"
213 213
 # does not really matter), while websites pump heavy content in HTTP2 streams
214 214
 #
215 215
 # It means that statistically there is a different between traffic shape:
216
-# TLS packet sizes are different, delays between packets are also different.
216
+# delays between packets are also different.
217 217
 # In order to avoid censorship detection based on these patterns, there is a
218 218
 # mtg subsystem called "Doppelganger" that aims to mimic website statistics
219 219
 # as close as it could.
220 220
 #
221
-# It does that by 2 ideas:
222
-#   1. Delays between TLS packets are not constant. There are many factors
223
-#      that come in play. Application should generate some response, it could
224
-#      send some headers first and stream content with chunked encoding. So
225
-#      some first packets could come as soon as possible, with some delays
226
-#      after first ones. Such phenomenon is described by different statistic
227
-#      distribution. There are 2 distribution that describe it: lognormal
228
-#      distribution and Weibul distribution. Lognormal is all about steady streams
229
-#      of heavy content like a video. Weibul is great about short bursts like
230
-#      user who requested a static page an a couple of images.
231
-#
232
-#      mtg tries to adapt Weibul distribution. It comes with some sensible
233
-#      defaults that were taken from ok.ru. But when you use domain fronting,
234
-#      it always make sense to take statistics from that website. You can specify
235
-#      some urls here. mtg will crawl them from time to time, accumulate time
236
-#      series and approximates parameters for Weibul.
237
-#   2. TLS record sizes are not random.
238
-#      https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
239
-#      https://aws.github.io/s2n-tls/usage-guide/ch08-record-sizes.html
240
-#
241
-#      The idea is that huge TLS records could negatively affect performance.
242
-#      You cannot simply decrypt a part of the packet, you need to wait it
243
-#      whole, and huge packets could involve several RTTs if you do not use
244
-#      any specific software that treat TLS in a very special way. So
245
-#      servers start with small packets, usually around MTU, and ramp up
246
-#      later. This optimizes a time-to-first byte so web browsers start to
247
-#      render early.
248
-#
249
-#      mtg uses the same technique as was introduced by Cloudflare in their
250
-#      patches to nginx 10 years ago:
251
-#      https://github.com/cloudflare/sslconfig/blob/master/patches/nginx__dynamic_tls_records.patch
221
+# Delays between TLS packets are not constant. There are many factors
222
+# that come in play. Application should generate some response, it could
223
+# send some headers first and stream content with chunked encoding. So
224
+# some first packets could come as soon as possible, with some delays
225
+# after first ones. Such phenomenon is described by different statistic
226
+# distribution. There are 2 distribution that describe it: lognormal
227
+# distribution and Weibul distribution. Lognormal is all about steady streams
228
+# of heavy content like a video. Weibul is great about short bursts like
229
+# user who requested a static page an a couple of images.
252 230
 [defense.doppelganger]
253 231
 # This is a list of URLs that would be crawled by mtg to approximate delay
254 232
 # statistics. They MUST be HTTPS urls.
@@ -266,6 +244,27 @@ repeats-per-raid = 10
266 244
 # do not change a lot, so do not expect different results if you request
267 245
 # each 10 minutes.
268 246
 raid-each = "6h"
247
+# This enables dynamic tls record sizing.
248
+#
249
+# Some modern stacks and platforms start to use the technique that is called
250
+# DRS. They start with small TLS packets and ramp up eventually. First packets
251
+# are usually about MTU size, after that we get 4k and eventually max size.
252
+# This is done with a good intention: to minimize a time to the first byte,
253
+# so application could start doing something with the data right after first
254
+# RTT.
255
+#
256
+# Apparently, about 90% of application do not employ this technique, they use
257
+# max size always: nginx, apache, java stuff. But Golang tools, angie and
258
+# some specific patches activate this technique.
259
+#
260
+# In order to mimic a real website we need to know something about software
261
+# it uses. Usually nobody cares: openssl does 16384, Python does it, nginx
262
+# does it. So this setting is disabled by default.
263
+#
264
+#      https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
265
+#      https://aws.github.io/s2n-tls/usage-guide/ch08-record-sizes.html
266
+#      https://github.com/cloudflare/sslconfig/blob/master/patches/nginx__dynamic_tls_records.patch
267
+drs = false
269 268
 
270 269
 # Some countries do active probing on Telegram connections. This technique
271 270
 # allows to protect from such effort.

+ 1
- 0
internal/cli/run_proxy.go Zobrazit soubor

@@ -265,6 +265,7 @@ func runProxy(conf *config.Config, version string) error { //nolint: funlen
265 265
 		DoppelGangerURLs:    doppelGangerURLs,
266 266
 		DoppelGangerPerRaid: conf.Defense.Doppelganger.Repeats.Get(mtglib.DoppelGangerPerRaid),
267 267
 		DoppelGangerEach:    conf.Defense.Doppelganger.UpdateEach.Get(mtglib.DoppelGangerEach),
268
+		DoppelGangerDRS:     conf.Defense.Doppelganger.DRS.Get(false),
268 269
 	}
269 270
 
270 271
 	proxy, err := mtglib.NewProxy(opts)

+ 1
- 0
internal/config/config.go Zobrazit soubor

@@ -53,6 +53,7 @@ type Config struct {
53 53
 			URLs       []TypeHttpsURL  `json:"urls"`
54 54
 			Repeats    TypeConcurrency `json:"repeats_per_raid"`
55 55
 			UpdateEach TypeDuration    `json:"raid_each"`
56
+			DRS        TypeBool        `json:"drs"`
56 57
 		} `json:"doppelganger"`
57 58
 	} `json:"defense"`
58 59
 	Network struct {

+ 1
- 0
internal/config/parse.go Zobrazit soubor

@@ -48,6 +48,7 @@ type tomlConfig struct {
48 48
 			URLs       []string `toml:"urls" json:"urls,omitempty"`
49 49
 			Repeats    uint     `toml:"repeats-per-raid" json:"repeats_per_raid,omitempty"`
50 50
 			UpdateEach string   `toml:"raid-each" json:"raid_each,omitempty"`
51
+			DRS        bool     `toml:"drs" json:"drs,omitempty"`
51 52
 		} `toml:"doppelganger" json:"doppelganger,omitempty"`
52 53
 	} `toml:"defense" json:"defense,omitempty"`
53 54
 	Network struct {

+ 45
- 10
mtglib/internal/doppel/conn.go Zobrazit soubor

@@ -16,18 +16,48 @@ type Conn struct {
16 16
 }
17 17
 
18 18
 type connPayload struct {
19
-	ctx         context.Context
20
-	ctxCancel   context.CancelCauseFunc
21
-	clock       Clock
22
-	wg          sync.WaitGroup
23
-	writeLock   sync.Mutex
24
-	writeStream bytes.Buffer
19
+	ctx           context.Context
20
+	ctxCancel     context.CancelCauseFunc
21
+	clock         Clock
22
+	wg            sync.WaitGroup
23
+	syncWriteLock sync.RWMutex
24
+	writeStream   bytes.Buffer
25
+	writeCond     *sync.Cond
25 26
 }
26 27
 
27 28
 func (c Conn) Write(p []byte) (int, error) {
28
-	c.p.writeLock.Lock()
29
+	c.p.syncWriteLock.RLock()
30
+	defer c.p.syncWriteLock.RUnlock()
31
+
32
+	c.p.writeCond.L.Lock()
33
+	c.p.writeStream.Write(p)
34
+	c.p.writeCond.L.Unlock()
35
+
36
+	return len(p), context.Cause(c.p.ctx)
37
+}
38
+
39
+func (c Conn) SyncWrite(p []byte) (int, error) {
40
+	c.p.syncWriteLock.Lock()
41
+	defer c.p.syncWriteLock.Unlock()
42
+
43
+	c.p.writeCond.L.Lock()
44
+	// wait until buffer is exhausted
45
+	for c.p.writeStream.Len() != 0 && context.Cause(c.p.ctx) == nil {
46
+		c.p.writeCond.Wait()
47
+	}
29 48
 	c.p.writeStream.Write(p)
30
-	c.p.writeLock.Unlock()
49
+	c.p.writeCond.L.Unlock()
50
+
51
+	if err := context.Cause(c.p.ctx); err != nil {
52
+		return len(p), err
53
+	}
54
+
55
+	c.p.writeCond.L.Lock()
56
+	// wait until data will be sent
57
+	for c.p.writeStream.Len() != 0 && context.Cause(c.p.ctx) == nil {
58
+		c.p.writeCond.Wait()
59
+	}
60
+	c.p.writeCond.L.Unlock()
31 61
 
32 62
 	return len(p), context.Cause(c.p.ctx)
33 63
 }
@@ -39,6 +69,8 @@ func (c Conn) Start() {
39 69
 }
40 70
 
41 71
 func (c Conn) start() {
72
+	defer c.p.writeCond.Broadcast()
73
+
42 74
 	buf := [tls.MaxRecordSize]byte{}
43 75
 
44 76
 	for {
@@ -48,9 +80,9 @@ func (c Conn) start() {
48 80
 		case <-c.p.clock.tick:
49 81
 		}
50 82
 
51
-		c.p.writeLock.Lock()
83
+		c.p.writeCond.L.Lock()
52 84
 		n, err := c.p.writeStream.Read(buf[:c.p.clock.stats.Size()])
53
-		c.p.writeLock.Unlock()
85
+		c.p.writeCond.L.Unlock()
54 86
 
55 87
 		if n == 0 || err != nil {
56 88
 			continue
@@ -60,6 +92,8 @@ func (c Conn) start() {
60 92
 			c.p.ctxCancel(err)
61 93
 			return
62 94
 		}
95
+
96
+		c.p.writeCond.Signal()
63 97
 	}
64 98
 }
65 99
 
@@ -75,6 +109,7 @@ func NewConn(ctx context.Context, conn essentials.Conn, stats *Stats) Conn {
75 109
 		p: &connPayload{
76 110
 			ctx:       ctx,
77 111
 			ctxCancel: cancel,
112
+			writeCond: sync.NewCond(&sync.Mutex{}),
78 113
 			clock: Clock{
79 114
 				stats: stats,
80 115
 				tick:  make(chan struct{}),

+ 130
- 0
mtglib/internal/doppel/conn_test.go Zobrazit soubor

@@ -157,6 +157,136 @@ func (suite *ConnTestSuite) TestStopOnUnderlyingWriteError() {
157 157
 	}, 2*time.Second, time.Millisecond)
158 158
 }
159 159
 
160
+func (suite *ConnTestSuite) TestSyncWriteDataSent() {
161
+	suite.connMock.
162
+		On("Write", mock.AnythingOfType("[]uint8")).
163
+		Return(0, nil).
164
+		Maybe()
165
+
166
+	c := suite.makeConn()
167
+	defer c.Stop()
168
+
169
+	payload := []byte("sync hello")
170
+	n, err := c.SyncWrite(payload)
171
+	suite.NoError(err)
172
+	suite.Equal(len(payload), n)
173
+
174
+	// SyncWrite returns only after data is flushed to the wire.
175
+	assembled := &bytes.Buffer{}
176
+	reader := bytes.NewReader(suite.connMock.Written())
177
+
178
+	for {
179
+		header := make([]byte, tls.SizeHeader)
180
+		if _, err := io.ReadFull(reader, header); err != nil {
181
+			break
182
+		}
183
+
184
+		suite.Equal(byte(tls.TypeApplicationData), header[0])
185
+
186
+		length := binary.BigEndian.Uint16(header[tls.SizeRecordType+tls.SizeVersion:])
187
+		rec := make([]byte, length)
188
+		_, err := io.ReadFull(reader, rec)
189
+		suite.NoError(err)
190
+
191
+		assembled.Write(rec)
192
+	}
193
+
194
+	suite.Equal(payload, assembled.Bytes())
195
+}
196
+
197
+func (suite *ConnTestSuite) TestSyncWriteDrainsBufferFirst() {
198
+	suite.connMock.
199
+		On("Write", mock.AnythingOfType("[]uint8")).
200
+		Return(0, nil).
201
+		Maybe()
202
+
203
+	c := suite.makeConn()
204
+	defer c.Stop()
205
+
206
+	// Buffer some data via async Write.
207
+	_, err := c.Write([]byte("first"))
208
+	suite.NoError(err)
209
+
210
+	// SyncWrite must drain "first" before sending "second".
211
+	n, err := c.SyncWrite([]byte("second"))
212
+	suite.NoError(err)
213
+	suite.Equal(6, n)
214
+
215
+	// All data should be on the wire now.
216
+	assembled := &bytes.Buffer{}
217
+	reader := bytes.NewReader(suite.connMock.Written())
218
+
219
+	for {
220
+		header := make([]byte, tls.SizeHeader)
221
+		if _, err := io.ReadFull(reader, header); err != nil {
222
+			break
223
+		}
224
+
225
+		length := binary.BigEndian.Uint16(header[tls.SizeRecordType+tls.SizeVersion:])
226
+		rec := make([]byte, length)
227
+		_, err := io.ReadFull(reader, rec)
228
+		suite.NoError(err)
229
+
230
+		assembled.Write(rec)
231
+	}
232
+
233
+	suite.Equal([]byte("firstsecond"), assembled.Bytes())
234
+}
235
+
236
+func (suite *ConnTestSuite) TestSyncWriteBlocksAsyncWrite() {
237
+	suite.connMock.
238
+		On("Write", mock.AnythingOfType("[]uint8")).
239
+		Return(0, nil).
240
+		Maybe()
241
+
242
+	c := suite.makeConn()
243
+	defer c.Stop()
244
+
245
+	// Start SyncWrite — it holds exclusive lock.
246
+	syncDone := make(chan struct{})
247
+
248
+	go func() {
249
+		defer close(syncDone)
250
+		c.SyncWrite([]byte("exclusive")) //nolint: errcheck
251
+	}()
252
+
253
+	// Give SyncWrite time to acquire the lock.
254
+	time.Sleep(10 * time.Millisecond)
255
+
256
+	// Async Write should block until SyncWrite completes.
257
+	writeDone := make(chan struct{})
258
+
259
+	go func() {
260
+		defer close(writeDone)
261
+		c.Write([]byte("blocked")) //nolint: errcheck
262
+	}()
263
+
264
+	// SyncWrite should finish first.
265
+	<-syncDone
266
+
267
+	select {
268
+	case <-writeDone:
269
+		// Write completed after SyncWrite — correct.
270
+	case <-time.After(2 * time.Second):
271
+		suite.Fail("async Write did not unblock after SyncWrite completed")
272
+	}
273
+}
274
+
275
+func (suite *ConnTestSuite) TestSyncWriteReturnsErrorAfterStop() {
276
+	suite.connMock.
277
+		On("Write", mock.AnythingOfType("[]uint8")).
278
+		Return(0, nil).
279
+		Maybe()
280
+
281
+	c := suite.makeConn()
282
+	c.Stop()
283
+
284
+	time.Sleep(10 * time.Millisecond)
285
+
286
+	_, err := c.SyncWrite([]byte("too late"))
287
+	suite.Error(err)
288
+}
289
+
160 290
 func TestConn(t *testing.T) {
161 291
 	t.Parallel()
162 292
 	suite.Run(t, &ConnTestSuite{})

+ 6
- 1
mtglib/internal/doppel/ganger.go Zobrazit soubor

@@ -29,6 +29,8 @@ type Ganger struct {
29 29
 	scoutRaidEach    time.Duration
30 30
 	scoutRaidRepeats int
31 31
 
32
+	drs bool
33
+
32 34
 	stats     *Stats
33 35
 	durations []time.Duration
34 36
 
@@ -107,7 +109,7 @@ func (g *Ganger) run() {
107 109
 			g.wg.Go(func() {
108 110
 				select {
109 111
 				case <-g.ctx.Done():
110
-				case updatedStatsChan <- NewStats(durations):
112
+				case updatedStatsChan <- NewStats(durations, g.drs):
111 113
 				}
112 114
 			})
113 115
 		case stats := <-updatedStatsChan:
@@ -152,6 +154,7 @@ func NewGanger(
152 154
 	scoutEach time.Duration,
153 155
 	scoutRepeats int,
154 156
 	urls []string,
157
+	drs bool,
155 158
 ) *Ganger {
156 159
 	ctx, cancel := context.WithCancel(ctx)
157 160
 
@@ -169,9 +172,11 @@ func NewGanger(
169 172
 		logger:           logger,
170 173
 		scoutRaidEach:    scoutEach,
171 174
 		scoutRaidRepeats: scoutRepeats,
175
+		drs:              drs,
172 176
 		stats: &Stats{
173 177
 			k:      StatsDefaultK,
174 178
 			lambda: StatsDefaultLambda,
179
+			drs:    drs,
175 180
 		},
176 181
 		scout:        NewScout(network, urls),
177 182
 		connRequests: make(chan gangerConnRequest),

+ 1
- 1
mtglib/internal/doppel/ganger_test.go Zobrazit soubor

@@ -29,7 +29,7 @@ func (suite *GangerTestSuite) SetupTest() {
29 29
 		On("WarningError", mock.AnythingOfType("string"), mock.Anything).
30 30
 		Maybe()
31 31
 
32
-	suite.g = NewGanger(suite.ctx, suite.network, suite.log, time.Hour, 1, suite.urls)
32
+	suite.g = NewGanger(suite.ctx, suite.network, suite.log, time.Hour, 1, suite.urls, true)
33 33
 	suite.g.Run()
34 34
 }
35 35
 

+ 2
- 2
mtglib/internal/doppel/init.go Zobrazit soubor

@@ -14,8 +14,8 @@ const (
14 14
 	// Please see Stats description
15 15
 	// https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
16 16
 	// https://github.com/cloudflare/sslconfig/blob/master/patches/nginx__dynamic_tls_records.patch
17
-	TLSRecordSizeStart = 1369
18
-	TLSRecordSizeAccel = 4229
17
+	TLSRecordSizeStart = 1450
18
+	TLSRecordSizeAccel = 4096
19 19
 	TLSRecordSizeMax   = 16384 - tls.SizeHeader
20 20
 
21 21
 	TLSCounterAccelAfter = 40

+ 14
- 3
mtglib/internal/doppel/stats.go Zobrazit soubor

@@ -17,6 +17,9 @@ const (
17 17
 	// these values are taken from ok.ru. measured from moscow site.
18 18
 	StatsDefaultK      = 0.37846373895785335
19 19
 	StatsDefaultLambda = 1.73177086015485
20
+
21
+	// how many bytes should we drift
22
+	DRSNoise = 100
20 23
 )
21 24
 
22 25
 // Stats is responsible for generating values that are distributed according
@@ -66,6 +69,9 @@ type Stats struct {
66 69
 	k float64
67 70
 	// https://en.wikipedia.org/wiki/Scale_parameter
68 71
 	lambda float64
72
+
73
+	// Dynamic Record Sizing
74
+	drs bool
69 75
 }
70 76
 
71 77
 func (d *Stats) Delay() time.Duration {
@@ -84,20 +90,24 @@ func (d *Stats) Size() int {
84 90
 		d.sizeCounter = 0
85 91
 	}
86 92
 
93
+	if !d.drs {
94
+		return TLSRecordSizeMax
95
+	}
96
+
87 97
 	d.sizeLastRequested = time.Now()
88 98
 	d.sizeCounter++
89 99
 
90 100
 	switch {
91 101
 	case d.sizeCounter <= TLSCounterAccelAfter:
92
-		return TLSRecordSizeStart
102
+		return TLSRecordSizeStart - rand.IntN(DRSNoise)
93 103
 	case d.sizeCounter <= TLSCounterMaxAfter:
94
-		return TLSRecordSizeAccel
104
+		return TLSRecordSizeAccel - rand.IntN(DRSNoise)
95 105
 	}
96 106
 
97 107
 	return TLSRecordSizeMax
98 108
 }
99 109
 
100
-func NewStats(durations []time.Duration) *Stats {
110
+func NewStats(durations []time.Duration, drs bool) *Stats {
101 111
 	n := float64(len(durations))
102 112
 
103 113
 	// in milliseconds
@@ -150,5 +160,6 @@ func NewStats(durations []time.Duration) *Stats {
150 160
 	return &Stats{
151 161
 		k:      k,
152 162
 		lambda: lambda,
163
+		drs:    drs,
153 164
 	}
154 165
 }

+ 37
- 12
mtglib/internal/doppel/stats_test.go Zobrazit soubor

@@ -38,7 +38,7 @@ func (suite *StatsTestSuite) TestNewStatsRecoverParameters() {
38 38
 	knownLambda := 100.0
39 39
 
40 40
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 42)
41
-	stats := NewStats(samples)
41
+	stats := NewStats(samples, true)
42 42
 
43 43
 	suite.InDelta(knownK, stats.k, 0.1)
44 44
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
@@ -50,7 +50,7 @@ func (suite *StatsTestSuite) TestNewStatsExponentialCase() {
50 50
 	knownLambda := 50.0
51 51
 
52 52
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 123)
53
-	stats := NewStats(samples)
53
+	stats := NewStats(samples, true)
54 54
 
55 55
 	suite.InDelta(knownK, stats.k, 0.1)
56 56
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
@@ -64,7 +64,7 @@ func (suite *StatsTestSuite) TestNewStatsSmallK() {
64 64
 	knownLambda := 100.0
65 65
 
66 66
 	samples := suite.GenWeibull(knownK, knownLambda, 10000, 99)
67
-	stats := NewStats(samples)
67
+	stats := NewStats(samples, true)
68 68
 
69 69
 	suite.InDelta(knownK, stats.k, 0.05)
70 70
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
@@ -76,7 +76,7 @@ func (suite *StatsTestSuite) TestNewStatsLargeK() {
76 76
 	knownLambda := 200.0
77 77
 
78 78
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 77)
79
-	stats := NewStats(samples)
79
+	stats := NewStats(samples, true)
80 80
 
81 81
 	suite.InDelta(knownK, stats.k, 0.3)
82 82
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
@@ -121,7 +121,7 @@ func (suite *StatsTestSuite) TestNewStatsRoundTrip() {
121 121
 	knownLambda := 80.0
122 122
 
123 123
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 555)
124
-	stats := NewStats(samples)
124
+	stats := NewStats(samples, true)
125 125
 
126 126
 	n := 50000
127 127
 	sum := 0.0
@@ -138,16 +138,17 @@ func (suite *StatsTestSuite) TestNewStatsRoundTrip() {
138 138
 }
139 139
 
140 140
 func (suite *StatsTestSuite) TestSizeStartPhase() {
141
-	stats := &Stats{k: 1.0, lambda: 1.0}
141
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
142 142
 
143 143
 	for range TLSCounterAccelAfter {
144 144
 		size := stats.Size()
145
-		suite.Equal(TLSRecordSizeStart, size)
145
+		suite.GreaterOrEqual(size, TLSRecordSizeStart-DRSNoise)
146
+		suite.LessOrEqual(size, TLSRecordSizeStart)
146 147
 	}
147 148
 }
148 149
 
149 150
 func (suite *StatsTestSuite) TestSizeAccelPhase() {
150
-	stats := &Stats{k: 1.0, lambda: 1.0}
151
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
151 152
 
152 153
 	for range TLSCounterAccelAfter {
153 154
 		stats.Size()
@@ -155,12 +156,13 @@ func (suite *StatsTestSuite) TestSizeAccelPhase() {
155 156
 
156 157
 	for range TLSCounterMaxAfter - TLSCounterAccelAfter {
157 158
 		size := stats.Size()
158
-		suite.Equal(TLSRecordSizeAccel, size)
159
+		suite.GreaterOrEqual(size, TLSRecordSizeAccel-DRSNoise)
160
+		suite.LessOrEqual(size, TLSRecordSizeAccel)
159 161
 	}
160 162
 }
161 163
 
162 164
 func (suite *StatsTestSuite) TestSizeMaxPhase() {
163
-	stats := &Stats{k: 1.0, lambda: 1.0}
165
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
164 166
 
165 167
 	for range TLSCounterMaxAfter {
166 168
 		stats.Size()
@@ -173,7 +175,7 @@ func (suite *StatsTestSuite) TestSizeMaxPhase() {
173 175
 }
174 176
 
175 177
 func (suite *StatsTestSuite) TestSizeResetsAfterInactivity() {
176
-	stats := &Stats{k: 1.0, lambda: 1.0}
178
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
177 179
 
178 180
 	// Advance past start phase.
179 181
 	for range TLSCounterMaxAfter {
@@ -185,7 +187,30 @@ func (suite *StatsTestSuite) TestSizeResetsAfterInactivity() {
185 187
 	// Simulate inactivity by backdating sizeLastRequested.
186 188
 	stats.sizeLastRequested = time.Now().Add(-TLSRecordSizeResetAfter - time.Millisecond)
187 189
 
188
-	suite.Equal(TLSRecordSizeStart, stats.Size())
190
+	size := stats.Size()
191
+	suite.GreaterOrEqual(size, TLSRecordSizeStart-DRSNoise)
192
+	suite.LessOrEqual(size, TLSRecordSizeStart)
193
+}
194
+
195
+func (suite *StatsTestSuite) TestSizeNoDRSAlwaysMax() {
196
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: false}
197
+
198
+	for range TLSCounterMaxAfter + 20 {
199
+		suite.Equal(TLSRecordSizeMax, stats.Size())
200
+	}
201
+}
202
+
203
+func (suite *StatsTestSuite) TestSizeNoDRSIgnoresCounter() {
204
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: false}
205
+
206
+	// Even after many calls, always returns max.
207
+	for range 200 {
208
+		suite.Equal(TLSRecordSizeMax, stats.Size())
209
+	}
210
+
211
+	// Inactivity has no effect either.
212
+	stats.sizeLastRequested = time.Now().Add(-TLSRecordSizeResetAfter - time.Millisecond)
213
+	suite.Equal(TLSRecordSizeMax, stats.Size())
189 214
 }
190 215
 
191 216
 func TestStats(t *testing.T) {

+ 2
- 1
mtglib/proxy.go Zobrazit soubor

@@ -90,7 +90,7 @@ func (p *Proxy) ServeConn(conn essentials.Conn) {
90 90
 	}
91 91
 	defer clientConn.Stop()
92 92
 
93
-	if _, err := clientConn.Write(noise); err != nil {
93
+	if _, err := clientConn.SyncWrite(noise); err != nil {
94 94
 		ctx.logger.InfoError("cannot send the first packet", err)
95 95
 		return
96 96
 	}
@@ -345,6 +345,7 @@ func NewProxy(opts ProxyOpts) (*Proxy, error) {
345 345
 			opts.DoppelGangerEach,
346 346
 			int(opts.DoppelGangerPerRaid),
347 347
 			opts.DoppelGangerURLs,
348
+			opts.DoppelGangerDRS,
348 349
 		),
349 350
 		configUpdater: dc.NewPublicConfigUpdater(
350 351
 			tg,

+ 3
- 0
mtglib/proxy_opts.go Zobrazit soubor

@@ -157,6 +157,9 @@ type ProxyOpts struct {
157 157
 	// DoppelGangerEach defines a time period between each raid. We recommend
158 158
 	// to use hours here.
159 159
 	DoppelGangerEach time.Duration
160
+
161
+	// DoppelGangerDRS defines if TLS Dynamic Record Sizing is active.
162
+	DoppelGangerDRS bool
160 163
 }
161 164
 
162 165
 func (p ProxyOpts) valid() error {

Načítá se…
Zrušit
Uložit