diff options
| author | Anay Wadhera <awadhera@berkeley.edu> | 2021-05-20 21:56:43 -0700 |
|---|---|---|
| committer | Michael Bestas <mkbestas@lineageos.org> | 2022-04-19 00:49:44 +0300 |
| commit | fbd2037fcf5dd7b668cce94a91b20b2b4af19f93 (patch) | |
| tree | 3d346de805297a061d03793259dba58f505f49e3 /kernel | |
| parent | 980add4490a19cbf5b73938a2b52278bf3ba8266 (diff) | |
Revert "bpf: prevent out-of-bounds speculation"
This reverts commit 9a7fad4c0e215fb1c256fee27c45f9f8bc4364c5.
Signed-off-by: Chatur27 <jasonbright2709@gmail.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/arraymap.c | 24 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 46 |
2 files changed, 9 insertions, 61 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 56f8a8306a49..b0799bced518 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -20,9 +20,8 @@ /* Called from syscall */ static struct bpf_map *array_map_alloc(union bpf_attr *attr) { - u32 elem_size, array_size, index_mask, max_entries; - bool unpriv = !capable(CAP_SYS_ADMIN); struct bpf_array *array; + u32 elem_size, array_size; /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || @@ -37,21 +36,12 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) elem_size = round_up(attr->value_size, 8); - max_entries = attr->max_entries; - index_mask = roundup_pow_of_two(max_entries) - 1; - - if (unpriv) - /* round up array size to nearest power of 2, - * since cpu will speculate within index_mask limits - */ - max_entries = index_mask + 1; - /* check round_up into zero and u32 overflow */ if (elem_size == 0 || - max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) + attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) return ERR_PTR(-ENOMEM); - array_size = sizeof(*array) + max_entries * elem_size; + array_size = sizeof(*array) + attr->max_entries * elem_size; /* allocate all map elements and zero-initialize them */ array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); @@ -60,8 +50,6 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) if (!array) return ERR_PTR(-ENOMEM); } - array->index_mask = index_mask; - array->map.unpriv_array = unpriv; /* copy mandatory map attributes */ array->map.key_size = attr->key_size; @@ -82,7 +70,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key) if (index >= array->map.max_entries) return NULL; - return array->value + array->elem_size * (index & array->index_mask); + return array->value + array->elem_size * index; } /* Called from syscall */ @@ -123,9 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, /* all elements already exist */ return -EEXIST; - memcpy(array->value + - array->elem_size * (index & array->index_mask), - value, map->value_size); + memcpy(array->value + array->elem_size * index, value, map->value_size); return 0; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 014c2d759916..bb4b5405d1a5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -187,10 +187,7 @@ struct verifier_stack_elem { }; struct bpf_insn_aux_data { - union { - enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ - struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ - }; + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -953,7 +950,7 @@ error: return -EINVAL; } -static int check_call(struct verifier_env *env, int func_id, int insn_idx) +static int check_call(struct verifier_env *env, int func_id) { struct verifier_state *state = &env->cur_state; const struct bpf_func_proto *fn = NULL; @@ -989,13 +986,6 @@ static int check_call(struct verifier_env *env, int func_id, int insn_idx) err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map); if (err) return err; - if (func_id == BPF_FUNC_tail_call) { - if (map == NULL) { - verbose("verifier bug\n"); - return -EINVAL; - } - env->insn_aux_data[insn_idx].map_ptr = map; - } err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map); if (err) return err; @@ -1921,7 +1911,7 @@ static int do_check(struct verifier_env *env) return -EINVAL; } - err = check_call(env, insn->imm, insn_idx); + err = check_call(env, insn->imm); if (err) return err; @@ -2212,10 +2202,7 @@ static int fixup_bpf_calls(struct verifier_env *env) struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; - struct bpf_insn insn_buf[16]; - struct bpf_prog *new_prog; - struct bpf_map *map_ptr; - int i, cnt, delta = 0; + int i; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL)) @@ -2233,31 +2220,6 @@ static int fixup_bpf_calls(struct verifier_env *env) */ insn->imm = 0; insn->code |= BPF_X; - - /* instead of changing every JIT dealing with tail_call - * emit two extra insns: - * if (index >= max_entries) goto out; - * index &= array->index_mask; - * to avoid out-of-bounds cpu speculation - */ - map_ptr = env->insn_aux_data[i + delta].map_ptr; - if (!map_ptr->unpriv_array) - continue; - insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, - map_ptr->max_entries, 2); - insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, - container_of(map_ptr, - struct bpf_array, - map)->index_mask); - insn_buf[2] = *insn; - cnt = 3; - new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); - if (!new_prog) - return -ENOMEM; - - delta += cnt - 1; - env->prog = prog = new_prog; - insn = new_prog->insnsi + i + delta; continue; } |
