Explorar el Código

Make DRS optional

tags/v2.2.0^2^2
9seconds hace 2 meses
padre
commit
21d7522356

+ 31
- 32
example.config.toml Ver fichero

213
 # does not really matter), while websites pump heavy content in HTTP2 streams
213
 # does not really matter), while websites pump heavy content in HTTP2 streams
214
 #
214
 #
215
 # It means that statistically there is a different between traffic shape:
215
 # It means that statistically there is a different between traffic shape:
216
-# TLS packet sizes are different, delays between packets are also different.
216
+# delays between packets are also different.
217
 # In order to avoid censorship detection based on these patterns, there is a
217
 # In order to avoid censorship detection based on these patterns, there is a
218
 # mtg subsystem called "Doppelganger" that aims to mimic website statistics
218
 # mtg subsystem called "Doppelganger" that aims to mimic website statistics
219
 # as close as it could.
219
 # as close as it could.
220
 #
220
 #
221
-# It does that by 2 ideas:
222
-#   1. Delays between TLS packets are not constant. There are many factors
223
-#      that come in play. Application should generate some response, it could
224
-#      send some headers first and stream content with chunked encoding. So
225
-#      some first packets could come as soon as possible, with some delays
226
-#      after first ones. Such phenomenon is described by different statistic
227
-#      distribution. There are 2 distribution that describe it: lognormal
228
-#      distribution and Weibul distribution. Lognormal is all about steady streams
229
-#      of heavy content like a video. Weibul is great about short bursts like
230
-#      user who requested a static page an a couple of images.
231
-#
232
-#      mtg tries to adapt Weibul distribution. It comes with some sensible
233
-#      defaults that were taken from ok.ru. But when you use domain fronting,
234
-#      it always make sense to take statistics from that website. You can specify
235
-#      some urls here. mtg will crawl them from time to time, accumulate time
236
-#      series and approximates parameters for Weibul.
237
-#   2. TLS record sizes are not random.
238
-#      https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
239
-#      https://aws.github.io/s2n-tls/usage-guide/ch08-record-sizes.html
240
-#
241
-#      The idea is that huge TLS records could negatively affect performance.
242
-#      You cannot simply decrypt a part of the packet, you need to wait it
243
-#      whole, and huge packets could involve several RTTs if you do not use
244
-#      any specific software that treat TLS in a very special way. So
245
-#      servers start with small packets, usually around MTU, and ramp up
246
-#      later. This optimizes a time-to-first byte so web browsers start to
247
-#      render early.
248
-#
249
-#      mtg uses the same technique as was introduced by Cloudflare in their
250
-#      patches to nginx 10 years ago:
251
-#      https://github.com/cloudflare/sslconfig/blob/master/patches/nginx__dynamic_tls_records.patch
221
+# Delays between TLS packets are not constant. There are many factors
222
+# that come in play. Application should generate some response, it could
223
+# send some headers first and stream content with chunked encoding. So
224
+# some first packets could come as soon as possible, with some delays
225
+# after first ones. Such phenomenon is described by different statistic
226
+# distribution. There are 2 distribution that describe it: lognormal
227
+# distribution and Weibul distribution. Lognormal is all about steady streams
228
+# of heavy content like a video. Weibul is great about short bursts like
229
+# user who requested a static page an a couple of images.
252
 [defense.doppelganger]
230
 [defense.doppelganger]
253
 # This is a list of URLs that would be crawled by mtg to approximate delay
231
 # This is a list of URLs that would be crawled by mtg to approximate delay
254
 # statistics. They MUST be HTTPS urls.
232
 # statistics. They MUST be HTTPS urls.
266
 # do not change a lot, so do not expect different results if you request
244
 # do not change a lot, so do not expect different results if you request
267
 # each 10 minutes.
245
 # each 10 minutes.
268
 raid-each = "6h"
246
 raid-each = "6h"
247
+# This enables dynamic tls record sizing.
248
+#
249
+# Some modern stacks and platforms start to use the technique that is called
250
+# DRS. They start with small TLS packets and ramp up eventually. First packets
251
+# are usually about MTU size, after that we get 4k and eventually max size.
252
+# This is done with a good intention: to minimize a time to the first byte,
253
+# so application could start doing something with the data right after first
254
+# RTT.
255
+#
256
+# Apparently, about 90% of application do not employ this technique, they use
257
+# max size always: nginx, apache, java stuff. But Golang tools, angie and
258
+# some specific patches activate this technique.
259
+#
260
+# In order to mimic a real website we need to know something about software
261
+# it uses. Usually nobody cares: openssl does 16384, Python does it, nginx
262
+# does it. So this setting is disabled by default.
263
+#
264
+#      https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
265
+#      https://aws.github.io/s2n-tls/usage-guide/ch08-record-sizes.html
266
+#      https://github.com/cloudflare/sslconfig/blob/master/patches/nginx__dynamic_tls_records.patch
267
+drs = false
269
 
268
 
270
 # Some countries do active probing on Telegram connections. This technique
269
 # Some countries do active probing on Telegram connections. This technique
271
 # allows to protect from such effort.
270
 # allows to protect from such effort.

+ 1
- 0
internal/cli/run_proxy.go Ver fichero

265
 		DoppelGangerURLs:    doppelGangerURLs,
265
 		DoppelGangerURLs:    doppelGangerURLs,
266
 		DoppelGangerPerRaid: conf.Defense.Doppelganger.Repeats.Get(mtglib.DoppelGangerPerRaid),
266
 		DoppelGangerPerRaid: conf.Defense.Doppelganger.Repeats.Get(mtglib.DoppelGangerPerRaid),
267
 		DoppelGangerEach:    conf.Defense.Doppelganger.UpdateEach.Get(mtglib.DoppelGangerEach),
267
 		DoppelGangerEach:    conf.Defense.Doppelganger.UpdateEach.Get(mtglib.DoppelGangerEach),
268
+		DoppelGangerDRS:     conf.Defense.Doppelganger.DRS.Get(false),
268
 	}
269
 	}
269
 
270
 
270
 	proxy, err := mtglib.NewProxy(opts)
271
 	proxy, err := mtglib.NewProxy(opts)

+ 1
- 0
internal/config/config.go Ver fichero

53
 			URLs       []TypeHttpsURL  `json:"urls"`
53
 			URLs       []TypeHttpsURL  `json:"urls"`
54
 			Repeats    TypeConcurrency `json:"repeats_per_raid"`
54
 			Repeats    TypeConcurrency `json:"repeats_per_raid"`
55
 			UpdateEach TypeDuration    `json:"raid_each"`
55
 			UpdateEach TypeDuration    `json:"raid_each"`
56
+			DRS        TypeBool        `json:"drs"`
56
 		} `json:"doppelganger"`
57
 		} `json:"doppelganger"`
57
 	} `json:"defense"`
58
 	} `json:"defense"`
58
 	Network struct {
59
 	Network struct {

+ 1
- 0
internal/config/parse.go Ver fichero

48
 			URLs       []string `toml:"urls" json:"urls,omitempty"`
48
 			URLs       []string `toml:"urls" json:"urls,omitempty"`
49
 			Repeats    uint     `toml:"repeats-per-raid" json:"repeats_per_raid,omitempty"`
49
 			Repeats    uint     `toml:"repeats-per-raid" json:"repeats_per_raid,omitempty"`
50
 			UpdateEach string   `toml:"raid-each" json:"raid_each,omitempty"`
50
 			UpdateEach string   `toml:"raid-each" json:"raid_each,omitempty"`
51
+			DRS        bool     `toml:"drs" json:"drs,omitempty"`
51
 		} `toml:"doppelganger" json:"doppelganger,omitempty"`
52
 		} `toml:"doppelganger" json:"doppelganger,omitempty"`
52
 	} `toml:"defense" json:"defense,omitempty"`
53
 	} `toml:"defense" json:"defense,omitempty"`
53
 	Network struct {
54
 	Network struct {

+ 6
- 1
mtglib/internal/doppel/ganger.go Ver fichero

29
 	scoutRaidEach    time.Duration
29
 	scoutRaidEach    time.Duration
30
 	scoutRaidRepeats int
30
 	scoutRaidRepeats int
31
 
31
 
32
+	drs bool
33
+
32
 	stats     *Stats
34
 	stats     *Stats
33
 	durations []time.Duration
35
 	durations []time.Duration
34
 
36
 
107
 			g.wg.Go(func() {
109
 			g.wg.Go(func() {
108
 				select {
110
 				select {
109
 				case <-g.ctx.Done():
111
 				case <-g.ctx.Done():
110
-				case updatedStatsChan <- NewStats(durations):
112
+				case updatedStatsChan <- NewStats(durations, g.drs):
111
 				}
113
 				}
112
 			})
114
 			})
113
 		case stats := <-updatedStatsChan:
115
 		case stats := <-updatedStatsChan:
152
 	scoutEach time.Duration,
154
 	scoutEach time.Duration,
153
 	scoutRepeats int,
155
 	scoutRepeats int,
154
 	urls []string,
156
 	urls []string,
157
+	drs bool,
155
 ) *Ganger {
158
 ) *Ganger {
156
 	ctx, cancel := context.WithCancel(ctx)
159
 	ctx, cancel := context.WithCancel(ctx)
157
 
160
 
169
 		logger:           logger,
172
 		logger:           logger,
170
 		scoutRaidEach:    scoutEach,
173
 		scoutRaidEach:    scoutEach,
171
 		scoutRaidRepeats: scoutRepeats,
174
 		scoutRaidRepeats: scoutRepeats,
175
+		drs:              drs,
172
 		stats: &Stats{
176
 		stats: &Stats{
173
 			k:      StatsDefaultK,
177
 			k:      StatsDefaultK,
174
 			lambda: StatsDefaultLambda,
178
 			lambda: StatsDefaultLambda,
179
+			drs:    drs,
175
 		},
180
 		},
176
 		scout:        NewScout(network, urls),
181
 		scout:        NewScout(network, urls),
177
 		connRequests: make(chan gangerConnRequest),
182
 		connRequests: make(chan gangerConnRequest),

+ 1
- 1
mtglib/internal/doppel/ganger_test.go Ver fichero

29
 		On("WarningError", mock.AnythingOfType("string"), mock.Anything).
29
 		On("WarningError", mock.AnythingOfType("string"), mock.Anything).
30
 		Maybe()
30
 		Maybe()
31
 
31
 
32
-	suite.g = NewGanger(suite.ctx, suite.network, suite.log, time.Hour, 1, suite.urls)
32
+	suite.g = NewGanger(suite.ctx, suite.network, suite.log, time.Hour, 1, suite.urls, true)
33
 	suite.g.Run()
33
 	suite.g.Run()
34
 }
34
 }
35
 
35
 

+ 2
- 2
mtglib/internal/doppel/init.go Ver fichero

14
 	// Please see Stats description
14
 	// Please see Stats description
15
 	// https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
15
 	// https://blog.cloudflare.com/optimizing-tls-over-tcp-to-reduce-latency/
16
 	// https://github.com/cloudflare/sslconfig/blob/master/patches/nginx__dynamic_tls_records.patch
16
 	// https://github.com/cloudflare/sslconfig/blob/master/patches/nginx__dynamic_tls_records.patch
17
-	TLSRecordSizeStart = 1369
18
-	TLSRecordSizeAccel = 4229
17
+	TLSRecordSizeStart = 1450
18
+	TLSRecordSizeAccel = 4096
19
 	TLSRecordSizeMax   = 16384 - tls.SizeHeader
19
 	TLSRecordSizeMax   = 16384 - tls.SizeHeader
20
 
20
 
21
 	TLSCounterAccelAfter = 40
21
 	TLSCounterAccelAfter = 40

+ 14
- 3
mtglib/internal/doppel/stats.go Ver fichero

17
 	// these values are taken from ok.ru. measured from moscow site.
17
 	// these values are taken from ok.ru. measured from moscow site.
18
 	StatsDefaultK      = 0.37846373895785335
18
 	StatsDefaultK      = 0.37846373895785335
19
 	StatsDefaultLambda = 1.73177086015485
19
 	StatsDefaultLambda = 1.73177086015485
20
+
21
+	// how many bytes should we drift
22
+	DRSNoise = 100
20
 )
23
 )
21
 
24
 
22
 // Stats is responsible for generating values that are distributed according
25
 // Stats is responsible for generating values that are distributed according
66
 	k float64
69
 	k float64
67
 	// https://en.wikipedia.org/wiki/Scale_parameter
70
 	// https://en.wikipedia.org/wiki/Scale_parameter
68
 	lambda float64
71
 	lambda float64
72
+
73
+	// Dynamic Record Sizing
74
+	drs bool
69
 }
75
 }
70
 
76
 
71
 func (d *Stats) Delay() time.Duration {
77
 func (d *Stats) Delay() time.Duration {
84
 		d.sizeCounter = 0
90
 		d.sizeCounter = 0
85
 	}
91
 	}
86
 
92
 
93
+	if !d.drs {
94
+		return TLSRecordSizeMax
95
+	}
96
+
87
 	d.sizeLastRequested = time.Now()
97
 	d.sizeLastRequested = time.Now()
88
 	d.sizeCounter++
98
 	d.sizeCounter++
89
 
99
 
90
 	switch {
100
 	switch {
91
 	case d.sizeCounter <= TLSCounterAccelAfter:
101
 	case d.sizeCounter <= TLSCounterAccelAfter:
92
-		return TLSRecordSizeStart
102
+		return TLSRecordSizeStart - rand.IntN(DRSNoise)
93
 	case d.sizeCounter <= TLSCounterMaxAfter:
103
 	case d.sizeCounter <= TLSCounterMaxAfter:
94
-		return TLSRecordSizeAccel
104
+		return TLSRecordSizeAccel - rand.IntN(DRSNoise)
95
 	}
105
 	}
96
 
106
 
97
 	return TLSRecordSizeMax
107
 	return TLSRecordSizeMax
98
 }
108
 }
99
 
109
 
100
-func NewStats(durations []time.Duration) *Stats {
110
+func NewStats(durations []time.Duration, drs bool) *Stats {
101
 	n := float64(len(durations))
111
 	n := float64(len(durations))
102
 
112
 
103
 	// in milliseconds
113
 	// in milliseconds
150
 	return &Stats{
160
 	return &Stats{
151
 		k:      k,
161
 		k:      k,
152
 		lambda: lambda,
162
 		lambda: lambda,
163
+		drs:    drs,
153
 	}
164
 	}
154
 }
165
 }

+ 37
- 12
mtglib/internal/doppel/stats_test.go Ver fichero

38
 	knownLambda := 100.0
38
 	knownLambda := 100.0
39
 
39
 
40
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 42)
40
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 42)
41
-	stats := NewStats(samples)
41
+	stats := NewStats(samples, true)
42
 
42
 
43
 	suite.InDelta(knownK, stats.k, 0.1)
43
 	suite.InDelta(knownK, stats.k, 0.1)
44
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
44
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
50
 	knownLambda := 50.0
50
 	knownLambda := 50.0
51
 
51
 
52
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 123)
52
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 123)
53
-	stats := NewStats(samples)
53
+	stats := NewStats(samples, true)
54
 
54
 
55
 	suite.InDelta(knownK, stats.k, 0.1)
55
 	suite.InDelta(knownK, stats.k, 0.1)
56
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
56
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
64
 	knownLambda := 100.0
64
 	knownLambda := 100.0
65
 
65
 
66
 	samples := suite.GenWeibull(knownK, knownLambda, 10000, 99)
66
 	samples := suite.GenWeibull(knownK, knownLambda, 10000, 99)
67
-	stats := NewStats(samples)
67
+	stats := NewStats(samples, true)
68
 
68
 
69
 	suite.InDelta(knownK, stats.k, 0.05)
69
 	suite.InDelta(knownK, stats.k, 0.05)
70
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
70
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
76
 	knownLambda := 200.0
76
 	knownLambda := 200.0
77
 
77
 
78
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 77)
78
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 77)
79
-	stats := NewStats(samples)
79
+	stats := NewStats(samples, true)
80
 
80
 
81
 	suite.InDelta(knownK, stats.k, 0.3)
81
 	suite.InDelta(knownK, stats.k, 0.3)
82
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
82
 	suite.InDelta(knownLambda, stats.lambda, 5.0)
121
 	knownLambda := 80.0
121
 	knownLambda := 80.0
122
 
122
 
123
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 555)
123
 	samples := suite.GenWeibull(knownK, knownLambda, 5000, 555)
124
-	stats := NewStats(samples)
124
+	stats := NewStats(samples, true)
125
 
125
 
126
 	n := 50000
126
 	n := 50000
127
 	sum := 0.0
127
 	sum := 0.0
138
 }
138
 }
139
 
139
 
140
 func (suite *StatsTestSuite) TestSizeStartPhase() {
140
 func (suite *StatsTestSuite) TestSizeStartPhase() {
141
-	stats := &Stats{k: 1.0, lambda: 1.0}
141
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
142
 
142
 
143
 	for range TLSCounterAccelAfter {
143
 	for range TLSCounterAccelAfter {
144
 		size := stats.Size()
144
 		size := stats.Size()
145
-		suite.Equal(TLSRecordSizeStart, size)
145
+		suite.GreaterOrEqual(size, TLSRecordSizeStart-DRSNoise)
146
+		suite.LessOrEqual(size, TLSRecordSizeStart)
146
 	}
147
 	}
147
 }
148
 }
148
 
149
 
149
 func (suite *StatsTestSuite) TestSizeAccelPhase() {
150
 func (suite *StatsTestSuite) TestSizeAccelPhase() {
150
-	stats := &Stats{k: 1.0, lambda: 1.0}
151
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
151
 
152
 
152
 	for range TLSCounterAccelAfter {
153
 	for range TLSCounterAccelAfter {
153
 		stats.Size()
154
 		stats.Size()
155
 
156
 
156
 	for range TLSCounterMaxAfter - TLSCounterAccelAfter {
157
 	for range TLSCounterMaxAfter - TLSCounterAccelAfter {
157
 		size := stats.Size()
158
 		size := stats.Size()
158
-		suite.Equal(TLSRecordSizeAccel, size)
159
+		suite.GreaterOrEqual(size, TLSRecordSizeAccel-DRSNoise)
160
+		suite.LessOrEqual(size, TLSRecordSizeAccel)
159
 	}
161
 	}
160
 }
162
 }
161
 
163
 
162
 func (suite *StatsTestSuite) TestSizeMaxPhase() {
164
 func (suite *StatsTestSuite) TestSizeMaxPhase() {
163
-	stats := &Stats{k: 1.0, lambda: 1.0}
165
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
164
 
166
 
165
 	for range TLSCounterMaxAfter {
167
 	for range TLSCounterMaxAfter {
166
 		stats.Size()
168
 		stats.Size()
173
 }
175
 }
174
 
176
 
175
 func (suite *StatsTestSuite) TestSizeResetsAfterInactivity() {
177
 func (suite *StatsTestSuite) TestSizeResetsAfterInactivity() {
176
-	stats := &Stats{k: 1.0, lambda: 1.0}
178
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: true}
177
 
179
 
178
 	// Advance past start phase.
180
 	// Advance past start phase.
179
 	for range TLSCounterMaxAfter {
181
 	for range TLSCounterMaxAfter {
185
 	// Simulate inactivity by backdating sizeLastRequested.
187
 	// Simulate inactivity by backdating sizeLastRequested.
186
 	stats.sizeLastRequested = time.Now().Add(-TLSRecordSizeResetAfter - time.Millisecond)
188
 	stats.sizeLastRequested = time.Now().Add(-TLSRecordSizeResetAfter - time.Millisecond)
187
 
189
 
188
-	suite.Equal(TLSRecordSizeStart, stats.Size())
190
+	size := stats.Size()
191
+	suite.GreaterOrEqual(size, TLSRecordSizeStart-DRSNoise)
192
+	suite.LessOrEqual(size, TLSRecordSizeStart)
193
+}
194
+
195
+func (suite *StatsTestSuite) TestSizeNoDRSAlwaysMax() {
196
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: false}
197
+
198
+	for range TLSCounterMaxAfter + 20 {
199
+		suite.Equal(TLSRecordSizeMax, stats.Size())
200
+	}
201
+}
202
+
203
+func (suite *StatsTestSuite) TestSizeNoDRSIgnoresCounter() {
204
+	stats := &Stats{k: 1.0, lambda: 1.0, drs: false}
205
+
206
+	// Even after many calls, always returns max.
207
+	for range 200 {
208
+		suite.Equal(TLSRecordSizeMax, stats.Size())
209
+	}
210
+
211
+	// Inactivity has no effect either.
212
+	stats.sizeLastRequested = time.Now().Add(-TLSRecordSizeResetAfter - time.Millisecond)
213
+	suite.Equal(TLSRecordSizeMax, stats.Size())
189
 }
214
 }
190
 
215
 
191
 func TestStats(t *testing.T) {
216
 func TestStats(t *testing.T) {

+ 1
- 0
mtglib/proxy.go Ver fichero

345
 			opts.DoppelGangerEach,
345
 			opts.DoppelGangerEach,
346
 			int(opts.DoppelGangerPerRaid),
346
 			int(opts.DoppelGangerPerRaid),
347
 			opts.DoppelGangerURLs,
347
 			opts.DoppelGangerURLs,
348
+			opts.DoppelGangerDRS,
348
 		),
349
 		),
349
 		configUpdater: dc.NewPublicConfigUpdater(
350
 		configUpdater: dc.NewPublicConfigUpdater(
350
 			tg,
351
 			tg,

+ 3
- 0
mtglib/proxy_opts.go Ver fichero

157
 	// DoppelGangerEach defines a time period between each raid. We recommend
157
 	// DoppelGangerEach defines a time period between each raid. We recommend
158
 	// to use hours here.
158
 	// to use hours here.
159
 	DoppelGangerEach time.Duration
159
 	DoppelGangerEach time.Duration
160
+
161
+	// DoppelGangerDRS defines if TLS Dynamic Record Sizing is active.
162
+	DoppelGangerDRS bool
160
 }
163
 }
161
 
164
 
162
 func (p ProxyOpts) valid() error {
165
 func (p ProxyOpts) valid() error {

Loading…
Cancelar
Guardar