summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2018-02-03 17:44:38 +0100
committerGreg Kroah-Hartman <gregkh@google.com>2018-02-03 17:44:38 +0100
commitaa856bd83c43969dc3e8f8c0e97e6105e91aef76 (patch)
tree8ce243d6493c2cb4abd9a8ca14ae80c919d6bd8f /kernel
parent4e74e983ab6ee28bb272bbcd317bd2d6d60e4be3 (diff)
parentf0feeec9c246f6518e168daec66d92a4a6bf0965 (diff)
Merge 4.4.115 into android-4.4
Changes in 4.4.115 loop: fix concurrent lo_open/lo_release bpf: fix branch pruning logic x86: bpf_jit: small optimization in emit_bpf_tail_call() bpf: fix bpf_tail_call() x64 JIT bpf: introduce BPF_JIT_ALWAYS_ON config bpf: arsh is not supported in 32 bit alu thus reject it bpf: avoid false sharing of map refcount with max_entries bpf: fix divides by zero bpf: fix 32-bit divide by zero bpf: reject stores into ctx via st and xadd x86/pti: Make unpoison of pgd for trusted boot work for real kaiser: fix intel_bts perf crashes ALSA: seq: Make ioctls race-free crypto: aesni - handle zero length dst buffer crypto: af_alg - whitelist mask and type power: reset: zx-reboot: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE gpio: iop: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE gpio: ath79: add missing MODULE_DESCRIPTION/LICENSE mtd: nand: denali_pci: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE igb: Free IRQs when device is hotplugged KVM: x86: emulator: Return to user-mode on L1 CPL=0 emulation failure KVM: x86: Don't re-execute instruction when not passing CR2 value KVM: X86: Fix operand/address-size during instruction decoding KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race KVM: x86: ioapic: Clear Remote IRR when entry is switched to edge-triggered KVM: x86: ioapic: Preserve read-only values in the redirection table ACPI / bus: Leave modalias empty for devices which are not present cpufreq: Add Loongson machine dependencies bcache: check return value of register_shrinker drm/amdgpu: Fix SDMA load/unload sequence on HWS disabled mode drm/amdkfd: Fix SDMA ring buffer size calculation drm/amdkfd: Fix SDMA oversubsription handling openvswitch: fix the incorrect flow action alloc size mac80211: fix the update of path metric for RANN frame btrfs: fix deadlock when writing out space cache KVM: VMX: Fix rflags cache during vCPU reset xen-netfront: remove warning when unloading module nfsd: CLOSE SHOULD return the invalid special stateid for NFSv4.x (x>0) nfsd: Ensure we check stateid validity in the seqid operation checks grace: replace BUG_ON by WARN_ONCE in exit_net hook nfsd: check for use of the closed special stateid lockd: fix "list_add double add" caused by legacy signal interface hwmon: (pmbus) Use 64bit math for DIRECT format values net: ethernet: xilinx: Mark XILINX_LL_TEMAC broken on 64-bit quota: Check for register_shrinker() failure. SUNRPC: Allow connect to return EHOSTUNREACH kmemleak: add scheduling point to kmemleak_scan() drm/omap: Fix error handling path in 'omap_dmm_probe()' xfs: ubsan fixes scsi: aacraid: Prevent crash in case of free interrupt during scsi EH path scsi: ufs: ufshcd: fix potential NULL pointer dereference in ufshcd_config_vreg media: usbtv: add a new usbid usb: gadget: don't dereference g until after it has been null checked staging: rtl8188eu: Fix incorrect response to SIOCGIWESSID usb: option: Add support for FS040U modem USB: serial: pl2303: new device id for Chilitag USB: cdc-acm: Do not log urb submission errors on disconnect CDC-ACM: apply quirk for card reader USB: serial: io_edgeport: fix possible sleep-in-atomic usbip: prevent bind loops on devices attached to vhci_hcd usbip: list: don't list devices attached to vhci_hcd USB: serial: simple: add Motorola Tetra driver usb: f_fs: Prevent gadget unbind if it is already unbound usb: uas: unconditionally bring back host after reset selinux: general protection fault in sock_has_perm serial: imx: Only wakeup via RTSDEN bit if the system has RTS/CTS spi: imx: do not access registers while clocks disabled Linux 4.4.115 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c30
-rw-r--r--kernel/bpf/verifier.c70
2 files changed, 96 insertions, 4 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3fd76cf0c21e..eb52d11fdaa7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -256,6 +256,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
}
EXPORT_SYMBOL_GPL(__bpf_call_base);
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
/**
* __bpf_prog_run - run eBPF program on a given context
* @ctx: is the data we are operating on
@@ -443,7 +444,7 @@ select_insn:
DST = tmp;
CONT;
ALU_MOD_X:
- if (unlikely(SRC == 0))
+ if (unlikely((u32)SRC == 0))
return 0;
tmp = (u32) DST;
DST = do_div(tmp, (u32) SRC);
@@ -462,7 +463,7 @@ select_insn:
DST = div64_u64(DST, SRC);
CONT;
ALU_DIV_X:
- if (unlikely(SRC == 0))
+ if (unlikely((u32)SRC == 0))
return 0;
tmp = (u32) DST;
do_div(tmp, (u32) SRC);
@@ -517,7 +518,7 @@ select_insn:
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_prog *prog;
- u64 index = BPF_R3;
+ u32 index = BPF_R3;
if (unlikely(index >= array->map.max_entries))
goto out;
@@ -725,6 +726,13 @@ load_byte:
return 0;
}
+#else
+static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
+{
+ return 0;
+}
+#endif
+
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
@@ -771,9 +779,23 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
*/
int bpf_prog_select_runtime(struct bpf_prog *fp)
{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
fp->bpf_func = (void *) __bpf_prog_run;
-
+#else
+ fp->bpf_func = (void *) __bpf_prog_ret0;
+#endif
+
+ /* eBPF JITs can rewrite the program in case constant
+ * blinding is active. However, in case of error during
+ * blinding, bpf_int_jit_compile() must always return a
+ * valid program, which in this case would simply not
+ * be JITed, but falls back to the interpreter.
+ */
bpf_int_jit_compile(fp);
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+ if (!fp->jited)
+ return -ENOTSUPP;
+#endif
bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 014c2d759916..c14003840bc5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -191,6 +191,7 @@ struct bpf_insn_aux_data {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
+ bool seen; /* this insn was processed by the verifier */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -682,6 +683,13 @@ static bool is_pointer_value(struct verifier_env *env, int regno)
}
}
+static bool is_ctx_reg(struct verifier_env *env, int regno)
+{
+ const struct reg_state *reg = &env->cur_state.regs[regno];
+
+ return reg->type == PTR_TO_CTX;
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -778,6 +786,12 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
return -EACCES;
}
+ if (is_ctx_reg(env, insn->dst_reg)) {
+ verbose("BPF_XADD stores into R%d context is not allowed\n",
+ insn->dst_reg);
+ return -EACCES;
+ }
+
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1164,6 +1178,11 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
+ if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
+ verbose("BPF_ARSH not supported for 32 bit ALU\n");
+ return -EINVAL;
+ }
+
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -1793,6 +1812,7 @@ static int do_check(struct verifier_env *env)
print_bpf_insn(env, insn);
}
+ env->insn_aux_data[insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@@ -1902,6 +1922,12 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
+ if (is_ctx_reg(env, insn->dst_reg)) {
+ verbose("BPF_ST stores into R%d context is not allowed\n",
+ insn->dst_reg);
+ return -EACCES;
+ }
+
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
@@ -1988,6 +2014,7 @@ process_bpf_exit:
return err;
insn_idx++;
+ env->insn_aux_data[insn_idx].seen = true;
} else {
verbose("invalid BPF_LD mode\n");
return -EINVAL;
@@ -2125,6 +2152,7 @@ static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ int i;
if (cnt == 1)
return 0;
@@ -2134,6 +2162,8 @@ static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len,
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+ for (i = off; i < off + cnt - 1; i++)
+ new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
@@ -2152,6 +2182,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct verifier_env *env, u32 off,
return new_prog;
}
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ const int insn_cnt = env->prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++) {
+ if (aux_data[i].seen)
+ continue;
+ memcpy(insn + i, &nop, sizeof(nop));
+ }
+}
+
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
@@ -2218,6 +2267,24 @@ static int fixup_bpf_calls(struct verifier_env *env)
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
+ if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+ /* due to JIT bugs clear upper 32-bits of src register
+ * before div/mod operation
+ */
+ insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
+ insn_buf[1] = *insn;
+ cnt = 2;
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ continue;
+ }
+
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
@@ -2371,6 +2438,9 @@ skip_full_check:
free_states(env);
if (ret == 0)
+ sanitize_dead_code(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);