summaryrefslogtreecommitdiff
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h216
1 files changed, 208 insertions, 8 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bae3da5bcda0..77476208f1b0 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,14 +10,18 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
+#include <linux/percpu.h>
+#include <linux/err.h>
+struct perf_event;
struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
- void (*map_free)(struct bpf_map *);
+ void (*map_release)(struct bpf_map *map, struct file *map_file);
+ void (*map_free)(struct bpf_map *map);
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
/* funcs callable from userspace and from eBPF programs */
@@ -26,8 +30,9 @@ struct bpf_map_ops {
int (*map_delete_elem)(struct bpf_map *map, void *key);
/* funcs called by prog_array and perf_event_array map */
- void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
- void (*map_fd_put_ptr) (void *ptr);
+ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
+ int fd);
+ void (*map_fd_put_ptr)(void *ptr);
};
struct bpf_map {
@@ -39,6 +44,7 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u32 map_flags;
u32 pages;
bool unpriv_array;
/* 7 bytes hole */
@@ -50,6 +56,9 @@ struct bpf_map {
atomic_t refcnt;
atomic_t usercnt;
struct work_struct work;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
};
struct bpf_map_type_list {
@@ -73,7 +82,13 @@ enum bpf_arg_type {
* functions that access data on eBPF program stack
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not
+ * need to be initialized, helper function must fill
+ * all bytes or clear them in error case.
+ */
+
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+ ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
@@ -93,6 +108,7 @@ enum bpf_return_type {
struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
+ bool pkt_access;
enum bpf_return_type ret_type;
enum bpf_arg_type arg1_type;
enum bpf_arg_type arg2_type;
@@ -112,6 +128,38 @@ enum bpf_access_type {
BPF_WRITE = 2
};
+/* types of values stored in eBPF registers */
+enum bpf_reg_type {
+ NOT_INIT = 0, /* nothing was written into register */
+ UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
+ PTR_TO_CTX, /* reg points to bpf_context */
+ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
+ PTR_TO_MAP_VALUE, /* reg points to map element value */
+ PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+ FRAME_PTR, /* reg == frame_pointer */
+ PTR_TO_STACK, /* reg == frame_pointer + imm */
+ CONST_IMM, /* constant integer value */
+
+ /* PTR_TO_PACKET represents:
+ * skb->data
+ * skb->data + imm
+ * skb->data + (u16) var
+ * skb->data + (u16) var + imm
+ * if (range > 0) then [ptr, ptr + range - off) is safe to access
+ * if (id > 0) means that some 'var' was added
+ * if (off > 0) menas that 'imm' was added
+ */
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_END, /* skb->data + headlen */
+
+ /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
+ * elem value. We only allow this if we can statically verify that
+ * access from this register are going to fall within the size of the
+ * map element.
+ */
+ PTR_TO_MAP_VALUE_ADJ,
+};
+
struct bpf_prog;
struct bpf_verifier_ops {
@@ -121,8 +169,10 @@ struct bpf_verifier_ops {
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
- bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
-
+ bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type);
+ int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
+ const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn, struct bpf_prog *prog);
@@ -137,10 +187,14 @@ struct bpf_prog_type_list {
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
+ u32 max_ctx_offset;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
struct user_struct *user;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
union {
struct work_struct work;
struct rcu_head rcu;
@@ -161,39 +215,154 @@ struct bpf_array {
union {
char value[0] __aligned(8);
void *ptrs[0] __aligned(8);
+ void __percpu *pptrs[0] __aligned(8);
};
};
+
#define MAX_TAIL_CALL_CNT 32
+struct bpf_event_entry {
+ struct perf_event *event;
+ struct file *perf_file;
+ struct file *map_file;
+ struct rcu_head rcu;
+};
+
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_fd_array_map_clear(struct bpf_map *map);
+u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
+ unsigned long off, unsigned long len);
+
+u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
+
+/* an array of programs to be executed under rcu_lock.
+ *
+ * Typical usage:
+ * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
+ *
+ * the structure returned by bpf_prog_array_alloc() should be populated
+ * with program pointers and the last pointer must be NULL.
+ * The user has to keep refcnt on the program and make sure the program
+ * is removed from the array before bpf_prog_put().
+ * The 'struct bpf_prog_array *' should only be replaced with xchg()
+ * since other cpus are walking the array of pointers in parallel.
+ */
+struct bpf_prog_array {
+ struct rcu_head rcu;
+ struct bpf_prog *progs[0];
+};
+
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
+void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
+
+void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
+ struct bpf_prog *old_prog);
+int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ struct bpf_prog *exclude_prog,
+ struct bpf_prog *include_prog,
+ struct bpf_prog_array **new_array);
+
+#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
+ ({ \
+ struct bpf_prog **_prog, *__prog; \
+ struct bpf_prog_array *_array; \
+ u32 _ret = 1; \
+ rcu_read_lock(); \
+ _array = rcu_dereference(array); \
+ if (unlikely(check_non_null && !_array))\
+ goto _out; \
+ _prog = _array->progs; \
+ while ((__prog = READ_ONCE(*_prog))) { \
+ _ret &= func(__prog, ctx); \
+ _prog++; \
+ } \
+_out: \
+ rcu_read_unlock(); \
+ _ret; \
+ })
+
+#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+
+#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+
#ifdef CONFIG_BPF_SYSCALL
+DECLARE_PER_CPU(int, bpf_prog_active);
+
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
+extern const struct file_operations bpf_map_fops;
+extern const struct file_operations bpf_prog_fops;
+
struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
+struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i);
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
+int __bpf_prog_charge(struct user_struct *user, u32 pages);
+void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
+int bpf_map_precharge_memlock(u32 pages);
+void *bpf_map_area_alloc(size_t size);
+void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
-int bpf_map_new_fd(struct bpf_map *map);
+int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
-int bpf_obj_get_user(const char __user *pathname);
+int bpf_obj_get_user(const char __user *pathname, int flags);
+
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
+ u64 flags);
+int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+ u64 flags);
+
+int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+
+int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
+ void *key, void *value, u64 map_flags);
+void bpf_fd_array_map_clear(struct bpf_map *map);
+
+int bpf_get_file_flag(int flags);
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ *ldst++ = *lsrc++;
+}
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+
+struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+
#else
static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
{
@@ -204,9 +373,39 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ enum bpf_prog_type type)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
+static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
+{
+ return 0;
+}
+
+static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
+{
+}
+
+static inline int bpf_obj_get_user(const char __user *pathname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
+ enum bpf_prog_type type)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
@@ -223,6 +422,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);