Skip to content

Commit db58ba4

Browse files
4astdavem330
authored andcommitted
bpf: wire in data and data_end for cls_act_bpf
allow cls_bpf and act_bpf programs access skb->data and skb->data_end pointers. The bpf helpers that change skb->data need to update data_end pointer as well. The verifier checks that programs always reload data, data_end pointers after calls to such bpf helpers. We cannot add 'data_end' pointer to struct qdisc_skb_cb directly, since it's embedded as-is by infiniband ipoib, so wrapper struct is needed. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 735b433 commit db58ba4

File tree

4 files changed

+65
-6
lines changed

4 files changed

+65
-6
lines changed

include/linux/filter.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,6 +352,22 @@ struct sk_filter {
352352

353353
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
354354

355+
struct bpf_skb_data_end {
356+
struct qdisc_skb_cb qdisc_cb;
357+
void *data_end;
358+
};
359+
360+
/* compute the linear packet data range [data, data_end) which
361+
* will be accessed by cls_bpf and act_bpf programs
362+
*/
363+
static inline void bpf_compute_data_end(struct sk_buff *skb)
364+
{
365+
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
366+
367+
BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
368+
cb->data_end = skb->data + skb_headlen(skb);
369+
}
370+
355371
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
356372
{
357373
/* eBPF programs may read/write skb->cb[] area to transfer meta

net/core/filter.c

Lines changed: 45 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1344,6 +1344,21 @@ struct bpf_scratchpad {
13441344

13451345
static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
13461346

1347+
static inline int bpf_try_make_writable(struct sk_buff *skb,
1348+
unsigned int write_len)
1349+
{
1350+
int err;
1351+
1352+
if (!skb_cloned(skb))
1353+
return 0;
1354+
if (skb_clone_writable(skb, write_len))
1355+
return 0;
1356+
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1357+
if (!err)
1358+
bpf_compute_data_end(skb);
1359+
return err;
1360+
}
1361+
13471362
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
13481363
{
13491364
struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
@@ -1366,7 +1381,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
13661381
*/
13671382
if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
13681383
return -EFAULT;
1369-
if (unlikely(skb_try_make_writable(skb, offset + len)))
1384+
if (unlikely(bpf_try_make_writable(skb, offset + len)))
13701385
return -EFAULT;
13711386

13721387
ptr = skb_header_pointer(skb, offset, len, sp->buff);
@@ -1444,7 +1459,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
14441459
return -EINVAL;
14451460
if (unlikely((u32) offset > 0xffff))
14461461
return -EFAULT;
1447-
if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
1462+
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
14481463
return -EFAULT;
14491464

14501465
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1499,7 +1514,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
14991514
return -EINVAL;
15001515
if (unlikely((u32) offset > 0xffff))
15011516
return -EFAULT;
1502-
if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
1517+
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
15031518
return -EFAULT;
15041519

15051520
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1699,12 +1714,15 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
16991714
{
17001715
struct sk_buff *skb = (struct sk_buff *) (long) r1;
17011716
__be16 vlan_proto = (__force __be16) r2;
1717+
int ret;
17021718

17031719
if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
17041720
vlan_proto != htons(ETH_P_8021AD)))
17051721
vlan_proto = htons(ETH_P_8021Q);
17061722

1707-
return skb_vlan_push(skb, vlan_proto, vlan_tci);
1723+
ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
1724+
bpf_compute_data_end(skb);
1725+
return ret;
17081726
}
17091727

17101728
const struct bpf_func_proto bpf_skb_vlan_push_proto = {
@@ -1720,8 +1738,11 @@ EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
17201738
static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
17211739
{
17221740
struct sk_buff *skb = (struct sk_buff *) (long) r1;
1741+
int ret;
17231742

1724-
return skb_vlan_pop(skb);
1743+
ret = skb_vlan_pop(skb);
1744+
bpf_compute_data_end(skb);
1745+
return ret;
17251746
}
17261747

17271748
const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
@@ -2066,8 +2087,12 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
20662087
static bool sk_filter_is_valid_access(int off, int size,
20672088
enum bpf_access_type type)
20682089
{
2069-
if (off == offsetof(struct __sk_buff, tc_classid))
2090+
switch (off) {
2091+
case offsetof(struct __sk_buff, tc_classid):
2092+
case offsetof(struct __sk_buff, data):
2093+
case offsetof(struct __sk_buff, data_end):
20702094
return false;
2095+
}
20712096

20722097
if (type == BPF_WRITE) {
20732098
switch (off) {
@@ -2215,6 +2240,20 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
22152240
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
22162241
break;
22172242

2243+
case offsetof(struct __sk_buff, data):
2244+
*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
2245+
dst_reg, src_reg,
2246+
offsetof(struct sk_buff, data));
2247+
break;
2248+
2249+
case offsetof(struct __sk_buff, data_end):
2250+
ctx_off -= offsetof(struct __sk_buff, data_end);
2251+
ctx_off += offsetof(struct sk_buff, cb);
2252+
ctx_off += offsetof(struct bpf_skb_data_end, data_end);
2253+
*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
2254+
dst_reg, src_reg, ctx_off);
2255+
break;
2256+
22182257
case offsetof(struct __sk_buff, tc_index):
22192258
#ifdef CONFIG_NET_SCHED
22202259
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);

net/sched/act_bpf.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,11 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
5353
filter = rcu_dereference(prog->filter);
5454
if (at_ingress) {
5555
__skb_push(skb, skb->mac_len);
56+
bpf_compute_data_end(skb);
5657
filter_res = BPF_PROG_RUN(filter, skb);
5758
__skb_pull(skb, skb->mac_len);
5859
} else {
60+
bpf_compute_data_end(skb);
5961
filter_res = BPF_PROG_RUN(filter, skb);
6062
}
6163
rcu_read_unlock();

net/sched/cls_bpf.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,9 +96,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
9696
if (at_ingress) {
9797
/* It is safe to push/pull even if skb_shared() */
9898
__skb_push(skb, skb->mac_len);
99+
bpf_compute_data_end(skb);
99100
filter_res = BPF_PROG_RUN(prog->filter, skb);
100101
__skb_pull(skb, skb->mac_len);
101102
} else {
103+
bpf_compute_data_end(skb);
102104
filter_res = BPF_PROG_RUN(prog->filter, skb);
103105
}
104106

0 commit comments

Comments
 (0)