Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
// SPDX-License-Identifier: GPL-2.0
/*
 * To speed up listener socket lookup, create an array to store all sockets
 * listening on the same port.  This allows a decision to be made after finding
 * the first socket.  An optional BPF program can also be configured for
 * selecting the socket index from the array of available sockets.
 */

#include <net/ip.h>
#include <net/sock_reuseport.h>
#include <linux/bpf.h>
#include <linux/idr.h>
#include <linux/filter.h>
#include <linux/rcupdate.h>

#define INIT_SOCKS 128

DEFINE_SPINLOCK(reuseport_lock);

static DEFINE_IDA(reuseport_ida);
static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
			       struct sock_reuseport *reuse, bool bind_inany);

static int reuseport_sock_index(struct sock *sk,
				const struct sock_reuseport *reuse,
				bool closed)
{
	int left, right;

	if (!closed) {
		left = 0;
		right = reuse->num_socks;
	} else {
		left = reuse->max_socks - reuse->num_closed_socks;
		right = reuse->max_socks;
	}

	for (; left < right; left++)
		if (reuse->socks[left] == sk)
			return left;
	return -1;
}

static void __reuseport_add_sock(struct sock *sk,
				 struct sock_reuseport *reuse)
{
	reuse->socks[reuse->num_socks] = sk;
	/* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
	smp_wmb();
	reuse->num_socks++;
}

static bool __reuseport_detach_sock(struct sock *sk,
				    struct sock_reuseport *reuse)
{
	int i = reuseport_sock_index(sk, reuse, false);

	if (i == -1)
		return false;

	reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
	reuse->num_socks--;

	return true;
}

static void __reuseport_add_closed_sock(struct sock *sk,
					struct sock_reuseport *reuse)
{
	reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
}

static bool __reuseport_detach_closed_sock(struct sock *sk,
					   struct sock_reuseport *reuse)
{
	int i = reuseport_sock_index(sk, reuse, true);

	if (i == -1)
		return false;

	reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);

	return true;
}

static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
	unsigned int size = sizeof(struct sock_reuseport) +
		      sizeof(struct sock *) * max_socks;
	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);

	if (!reuse)
		return NULL;

	reuse->max_socks = max_socks;

	RCU_INIT_POINTER(reuse->prog, NULL);
	return reuse;
}

int reuseport_alloc(struct sock *sk, bool bind_inany)
{
	struct sock_reuseport *reuse;
	int id, ret = 0;

	/* bh lock used since this function call may precede hlist lock in
	 * soft irq of receive path or setsockopt from process context
	 */
	spin_lock_bh(&reuseport_lock);

	/* Allocation attempts can occur concurrently via the setsockopt path
	 * and the bind/hash path.  Nothing to do when we lose the race.
	 */
	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
					  lockdep_is_held(&reuseport_lock));
	if (reuse) {
		if (reuse->num_closed_socks) {
			/* sk was shutdown()ed before */
			ret = reuseport_resurrect(sk, reuse, NULL, bind_inany);
			goto out;
		}

		/* Only set reuse->bind_inany if the bind_inany is true.
		 * Otherwise, it will overwrite the reuse->bind_inany
		 * which was set by the bind/hash path.
		 */
		if (bind_inany)
			reuse->bind_inany = bind_inany;
		goto out;
	}

	reuse = __reuseport_alloc(INIT_SOCKS);
	if (!reuse) {
		ret = -ENOMEM;
		goto out;
	}

	id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
	if (id < 0) {
		kfree(reuse);
		ret = id;
		goto out;
	}

	reuse->reuseport_id = id;
	reuse->bind_inany = bind_inany;
	reuse->socks[0] = sk;
	reuse->num_socks = 1;
	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);

out:
	spin_unlock_bh(&reuseport_lock);

	return ret;
}
EXPORT_SYMBOL(reuseport_alloc);

static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
{
	struct sock_reuseport *more_reuse;
	u32 more_socks_size, i;

	more_socks_size = reuse->max_socks * 2U;
	if (more_socks_size > U16_MAX) {
		if (reuse->num_closed_socks) {
			/* Make room by removing a closed sk.
			 * The child has already been migrated.
			 * Only reqsk left at this point.
			 */
			struct sock *sk;

			sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
			RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL);
			__reuseport_detach_closed_sock(sk, reuse);

			return reuse;
		}

		return NULL;
	}

	more_reuse = __reuseport_alloc(more_socks_size);
	if (!more_reuse)
		return NULL;

	more_reuse->num_socks = reuse->num_socks;
	more_reuse->num_closed_socks = reuse->num_closed_socks;
	more_reuse->prog = reuse->prog;
	more_reuse->reuseport_id = reuse->reuseport_id;
	more_reuse->bind_inany = reuse->bind_inany;
	more_reuse->has_conns = reuse->has_conns;

	memcpy(more_reuse->socks, reuse->socks,
	       reuse->num_socks * sizeof(struct sock *));
	memcpy(more_reuse->socks +
	       (more_reuse->max_socks - more_reuse->num_closed_socks),
	       reuse->socks + (reuse->max_socks - reuse->num_closed_socks),
	       reuse->num_closed_socks * sizeof(struct sock *));
	more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);

	for (i = 0; i < reuse->max_socks; ++i)
		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
				   more_reuse);

	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
	 * that reuse and more_reuse can temporarily share a reference
	 * to prog.
	 */
	kfree_rcu(reuse, rcu);
	return more_reuse;
}

static void reuseport_free_rcu(struct rcu_head *head)
{
	struct sock_reuseport *reuse;

	reuse = container_of(head, struct sock_reuseport, rcu);
	sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
	ida_free(&reuseport_ida, reuse->reuseport_id);
	kfree(reuse);
}

/**
 *  reuseport_add_sock - Add a socket to the reuseport group of another.
 *  @sk:  New socket to add to the group.
 *  @sk2: Socket belonging to the existing reuseport group.
 *  @bind_inany: Whether or not the group is bound to a local INANY address.
 *
 *  May return ENOMEM and not add socket to group under memory pressure.
 */
int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
{
	struct sock_reuseport *old_reuse, *reuse;

	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
		int err = reuseport_alloc(sk2, bind_inany);

		if (err)
			return err;
	}

	spin_lock_bh(&reuseport_lock);
	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
					  lockdep_is_held(&reuseport_lock));
	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
					      lockdep_is_held(&reuseport_lock));
	if (old_reuse && old_reuse->num_closed_socks) {
		/* sk was shutdown()ed before */
		int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);

		spin_unlock_bh(&reuseport_lock);
		return err;
	}

	if (old_reuse && old_reuse->num_socks != 1) {
		spin_unlock_bh(&reuseport_lock);
		return -EBUSY;
	}

	if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
		reuse = reuseport_grow(reuse);
		if (!reuse) {
			spin_unlock_bh(&reuseport_lock);
			return -ENOMEM;
		}
	}

	__reuseport_add_sock(sk, reuse);
	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);

	spin_unlock_bh(&reuseport_lock);

	if (old_reuse)
		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
	return 0;
}
EXPORT_SYMBOL(reuseport_add_sock);

static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
			       struct sock_reuseport *reuse, bool bind_inany)
{
	if (old_reuse == reuse) {
		/* If sk was in the same reuseport group, just pop sk out of
		 * the closed section and push sk into the listening section.
		 */
		__reuseport_detach_closed_sock(sk, old_reuse);
		__reuseport_add_sock(sk, old_reuse);
		return 0;
	}

	if (!reuse) {
		/* In bind()/listen() path, we cannot carry over the eBPF prog
		 * for the shutdown()ed socket. In setsockopt() path, we should
		 * not change the eBPF prog of listening sockets by attaching a
		 * prog to the shutdown()ed socket. Thus, we will allocate a new
		 * reuseport group and detach sk from the old group.
		 */
		int id;

		reuse = __reuseport_alloc(INIT_SOCKS);
		if (!reuse)
			return -ENOMEM;

		id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
		if (id < 0) {
			kfree(reuse);
			return id;
		}

		reuse->reuseport_id = id;
		reuse->bind_inany = bind_inany;
	} else {
		/* Move sk from the old group to the new one if
		 * - all the other listeners in the old group were close()d or
		 *   shutdown()ed, and then sk2 has listen()ed on the same port
		 * OR
		 * - sk listen()ed without bind() (or with autobind), was
		 *   shutdown()ed, and then listen()s on another port which
		 *   sk2 listen()s on.
		 */
		if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
			reuse = reuseport_grow(reuse);
			if (!reuse)
				return -ENOMEM;
		}
	}

	__reuseport_detach_closed_sock(sk, old_reuse);
	__reuseport_add_sock(sk, reuse);
	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);

	if (old_reuse->num_socks + old_reuse->num_closed_socks == 0)
		call_rcu(&old_reuse->rcu, reuseport_free_rcu);

	return 0;
}

void reuseport_detach_sock(struct sock *sk)
{
	struct sock_reuseport *reuse;

	spin_lock_bh(&reuseport_lock);
	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
					  lockdep_is_held(&reuseport_lock));

	/* reuseport_grow() has detached a closed sk */
	if (!reuse)
		goto out;

	/* Notify the bpf side. The sk may be added to a sockarray
	 * map. If so, sockarray logic will remove it from the map.
	 *
	 * Other bpf map types that work with reuseport, like sockmap,
	 * don't need an explicit callback from here. They override sk
	 * unhash/close ops to remove the sk from the map before we
	 * get to this point.
	 */
	bpf_sk_reuseport_detach(sk);

	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);

	if (!__reuseport_detach_closed_sock(sk, reuse))
		__reuseport_detach_sock(sk, reuse);

	if (reuse->num_socks + reuse->num_closed_socks == 0)
		call_rcu(&reuse->rcu, reuseport_free_rcu);

out:
	spin_unlock_bh(&reuseport_lock);
}
EXPORT_SYMBOL(reuseport_detach_sock);

void reuseport_stop_listen_sock(struct sock *sk)
{
	if (sk->sk_protocol == IPPROTO_TCP) {
		struct sock_reuseport *reuse;
		struct bpf_prog *prog;

		spin_lock_bh(&reuseport_lock);

		reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
						  lockdep_is_held(&reuseport_lock));
		prog = rcu_dereference_protected(reuse->prog,
						 lockdep_is_held(&reuseport_lock));

		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
		    (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
			/* Migration capable, move sk from the listening section
			 * to the closed section.
			 */
			bpf_sk_reuseport_detach(sk);

			__reuseport_detach_sock(sk, reuse);
			__reuseport_add_closed_sock(sk, reuse);

			spin_unlock_bh(&reuseport_lock);
			return;
		}

		spin_unlock_bh(&reuseport_lock);
	}

	/* Not capable to do migration, detach immediately */
	reuseport_detach_sock(sk);
}
EXPORT_SYMBOL(reuseport_stop_listen_sock);

static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
				   struct bpf_prog *prog, struct sk_buff *skb,
				   int hdr_len)
{
	struct sk_buff *nskb = NULL;
	u32 index;

	if (skb_shared(skb)) {
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (!nskb)
			return NULL;
		skb = nskb;
	}

	/* temporarily advance data past protocol header */
	if (!pskb_pull(skb, hdr_len)) {
		kfree_skb(nskb);
		return NULL;
	}
	index = bpf_prog_run_save_cb(prog, skb);
	__skb_push(skb, hdr_len);

	consume_skb(nskb);

	if (index >= socks)
		return NULL;

	return reuse->socks[index];
}

static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
						  u32 hash, u16 num_socks)
{
	int i, j;

	i = j = reciprocal_scale(hash, num_socks);
	while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
		i++;
		if (i >= num_socks)
			i = 0;
		if (i == j)
			return NULL;
	}

	return reuse->socks[i];
}

/**
 *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
 *  @sk: First socket in the group.
 *  @hash: When no BPF filter is available, use this hash to select.
 *  @skb: skb to run through BPF filter.
 *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
 *    the skb does not yet point at the payload, this parameter represents
 *    how far the pointer needs to advance to reach the payload.
 *  Returns a socket that should receive the packet (or NULL on error).
 */
struct sock *reuseport_select_sock(struct sock *sk,
				   u32 hash,
				   struct sk_buff *skb,
				   int hdr_len)
{
	struct sock_reuseport *reuse;
	struct bpf_prog *prog;
	struct sock *sk2 = NULL;
	u16 socks;

	rcu_read_lock();
	reuse = rcu_dereference(sk->sk_reuseport_cb);

	/* if memory allocation failed or add call is not yet complete */
	if (!reuse)
		goto out;

	prog = rcu_dereference(reuse->prog);
	socks = READ_ONCE(reuse->num_socks);
	if (likely(socks)) {
		/* paired with smp_wmb() in __reuseport_add_sock() */
		smp_rmb();

		if (!prog || !skb)
			goto select_by_hash;

		if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash);
		else
			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);

select_by_hash:
		/* no bpf or invalid bpf result: fall back to hash usage */
		if (!sk2)
			sk2 = reuseport_select_sock_by_hash(reuse, hash, socks);
	}

out:
	rcu_read_unlock();
	return sk2;
}
EXPORT_SYMBOL(reuseport_select_sock);

/**
 *  reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group.
 *  @sk: close()ed or shutdown()ed socket in the group.
 *  @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or
 *    NEW_SYN_RECV request socket during 3WHS.
 *  @skb: skb to run through BPF filter.
 *  Returns a socket (with sk_refcnt +1) that should accept the child socket
 *  (or NULL on error).
 */
struct sock *reuseport_migrate_sock(struct sock *sk,
				    struct sock *migrating_sk,
				    struct sk_buff *skb)
{
	struct sock_reuseport *reuse;
	struct sock *nsk = NULL;
	bool allocated = false;
	struct bpf_prog *prog;
	u16 socks;
	u32 hash;

	rcu_read_lock();

	reuse = rcu_dereference(sk->sk_reuseport_cb);
	if (!reuse)
		goto out;

	socks = READ_ONCE(reuse->num_socks);
	if (unlikely(!socks))
		goto failure;

	/* paired with smp_wmb() in __reuseport_add_sock() */
	smp_rmb();

	hash = migrating_sk->sk_hash;
	prog = rcu_dereference(reuse->prog);
	if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req))
			goto select_by_hash;
		goto failure;
	}

	if (!skb) {
		skb = alloc_skb(0, GFP_ATOMIC);
		if (!skb)
			goto failure;
		allocated = true;
	}

	nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash);

	if (allocated)
		kfree_skb(skb);

select_by_hash:
	if (!nsk)
		nsk = reuseport_select_sock_by_hash(reuse, hash, socks);

	if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
		nsk = NULL;
		goto failure;
	}

out:
	rcu_read_unlock();
	return nsk;

failure:
	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
	goto out;
}
EXPORT_SYMBOL(reuseport_migrate_sock);

int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
{
	struct sock_reuseport *reuse;
	struct bpf_prog *old_prog;

	if (sk_unhashed(sk)) {
		int err;

		if (!sk->sk_reuseport)
			return -EINVAL;

		err = reuseport_alloc(sk, false);
		if (err)
			return err;
	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
		/* The socket wasn't bound with SO_REUSEPORT */
		return -EINVAL;
	}

	spin_lock_bh(&reuseport_lock);
	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
					  lockdep_is_held(&reuseport_lock));
	old_prog = rcu_dereference_protected(reuse->prog,
					     lockdep_is_held(&reuseport_lock));
	rcu_assign_pointer(reuse->prog, prog);
	spin_unlock_bh(&reuseport_lock);

	sk_reuseport_prog_free(old_prog);
	return 0;
}
EXPORT_SYMBOL(reuseport_attach_prog);

int reuseport_detach_prog(struct sock *sk)
{
	struct sock_reuseport *reuse;
	struct bpf_prog *old_prog;

	old_prog = NULL;
	spin_lock_bh(&reuseport_lock);
	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
					  lockdep_is_held(&reuseport_lock));

	/* reuse must be checked after acquiring the reuseport_lock
	 * because reuseport_grow() can detach a closed sk.
	 */
	if (!reuse) {
		spin_unlock_bh(&reuseport_lock);
		return sk->sk_reuseport ? -ENOENT : -EINVAL;
	}

	if (sk_unhashed(sk) && reuse->num_closed_socks) {
		spin_unlock_bh(&reuseport_lock);
		return -ENOENT;
	}

	old_prog = rcu_replace_pointer(reuse->prog, old_prog,
				       lockdep_is_held(&reuseport_lock));
	spin_unlock_bh(&reuseport_lock);

	if (!old_prog)
		return -ENOENT;

	sk_reuseport_prog_free(old_prog);
	return 0;
}
EXPORT_SYMBOL(reuseport_detach_prog);