sync patches from openEuler-22.03-LTS-SP2
(cherry picked from commit 5a82a2ec311ed895542fc8dd1ccb694d9ed10cc9)
This commit is contained in:
parent
6005c7aee8
commit
d66e0e0eba
742
0001-remove-nonexistent-helper-functions-and-add-new-help.patch
Normal file
742
0001-remove-nonexistent-helper-functions-and-add-new-help.patch
Normal file
@ -0,0 +1,742 @@
|
||||
From 988989d6b8d1a79f1fd9afbbc2b3ae13aaf27ca4 Mon Sep 17 00:00:00 2001
|
||||
From: JofDiamonds <kwb0523@163.com>
|
||||
Date: Mon, 29 May 2023 19:52:53 +0800
|
||||
Subject: [PATCH] remove nonexistent helper functions and add new helper
|
||||
functions
|
||||
|
||||
---
|
||||
src/bpf_helper_defs.h | 710 +-----------------------------------------
|
||||
1 file changed, 9 insertions(+), 701 deletions(-)
|
||||
|
||||
diff --git a/src/bpf_helper_defs.h b/src/bpf_helper_defs.h
|
||||
index abe612e..c8b3b33 100644
|
||||
--- a/src/bpf_helper_defs.h
|
||||
+++ b/src/bpf_helper_defs.h
|
||||
@@ -3659,715 +3659,23 @@ static void *(*bpf_this_cpu_ptr)(const void *percpu_ptr) = (void *) 154;
|
||||
static long (*bpf_redirect_peer)(__u32 ifindex, __u64 flags) = (void *) 155;
|
||||
|
||||
/*
|
||||
- * bpf_task_storage_get
|
||||
+ * bpf_get_sockops_uid_gid
|
||||
*
|
||||
- * Get a bpf_local_storage from the *task*.
|
||||
- *
|
||||
- * Logically, it could be thought of as getting the value from
|
||||
- * a *map* with *task* as the **key**. From this
|
||||
- * perspective, the usage is not much different from
|
||||
- * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
|
||||
- * helper enforces the key must be an task_struct and the map must also
|
||||
- * be a **BPF_MAP_TYPE_TASK_STORAGE**.
|
||||
- *
|
||||
- * Underneath, the value is stored locally at *task* instead of
|
||||
- * the *map*. The *map* is used as the bpf-local-storage
|
||||
- * "type". The bpf-local-storage "type" (i.e. the *map*) is
|
||||
- * searched against all bpf_local_storage residing at *task*.
|
||||
- *
|
||||
- * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
|
||||
- * used such that a new bpf_local_storage will be
|
||||
- * created if one does not exist. *value* can be used
|
||||
- * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
|
||||
- * the initial value of a bpf_local_storage. If *value* is
|
||||
- * **NULL**, the new bpf_local_storage will be zero initialized.
|
||||
- *
|
||||
- * Returns
|
||||
- * A bpf_local_storage pointer is returned on success.
|
||||
- *
|
||||
- * **NULL** if not found or there was an error in adding
|
||||
- * a new bpf_local_storage.
|
||||
- */
|
||||
-static void *(*bpf_task_storage_get)(void *map, struct task_struct *task, void *value, __u64 flags) = (void *) 156;
|
||||
-
|
||||
-/*
|
||||
- * bpf_task_storage_delete
|
||||
- *
|
||||
- * Delete a bpf_local_storage from a *task*.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- *
|
||||
- * **-ENOENT** if the bpf_local_storage cannot be found.
|
||||
- */
|
||||
-static long (*bpf_task_storage_delete)(void *map, struct task_struct *task) = (void *) 157;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_current_task_btf
|
||||
- *
|
||||
- * Return a BTF pointer to the "current" task.
|
||||
- * This pointer can also be used in helpers that accept an
|
||||
- * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
|
||||
- *
|
||||
- * Returns
|
||||
- * Pointer to the current task.
|
||||
- */
|
||||
-static struct task_struct *(*bpf_get_current_task_btf)(void) = (void *) 158;
|
||||
-
|
||||
-/*
|
||||
- * bpf_bprm_opts_set
|
||||
- *
|
||||
- * Set or clear certain options on *bprm*:
|
||||
- *
|
||||
- * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
|
||||
- * which sets the **AT_SECURE** auxv for glibc. The bit
|
||||
- * is cleared if the flag is not specified.
|
||||
- *
|
||||
- * Returns
|
||||
- * **-EINVAL** if invalid *flags* are passed, zero otherwise.
|
||||
- */
|
||||
-static long (*bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) = (void *) 159;
|
||||
-
|
||||
-/*
|
||||
- * bpf_ktime_get_coarse_ns
|
||||
- *
|
||||
- * Return a coarse-grained version of the time elapsed since
|
||||
- * system boot, in nanoseconds. Does not include time the system
|
||||
- * was suspended.
|
||||
- *
|
||||
- * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
|
||||
- *
|
||||
- * Returns
|
||||
- * Current *ktime*.
|
||||
- */
|
||||
-static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *) 160;
|
||||
-
|
||||
-/*
|
||||
- * bpf_ima_inode_hash
|
||||
- *
|
||||
- * Returns the stored IMA hash of the *inode* (if it's avaialable).
|
||||
- * If the hash is larger than *size*, then only *size*
|
||||
- * bytes will be copied to *dst*
|
||||
- *
|
||||
- * Returns
|
||||
- * The **hash_algo** is returned on success,
|
||||
- * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
|
||||
- * invalid arguments are passed.
|
||||
- */
|
||||
-static long (*bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = (void *) 161;
|
||||
-
|
||||
-/*
|
||||
- * bpf_sock_from_file
|
||||
- *
|
||||
- * If the given file represents a socket, returns the associated
|
||||
- * socket.
|
||||
+ * Get sock's uid and gid
|
||||
*
|
||||
* Returns
|
||||
- * A pointer to a struct socket on success or NULL if the file is
|
||||
- * not a socket.
|
||||
+ * A 64-bit integer containing the current GID and UID, and
|
||||
+ * created as such: *current_gid* **<< 32 \|** *current_uid*.
|
||||
*/
|
||||
-static struct socket *(*bpf_sock_from_file)(struct file *file) = (void *) 162;
|
||||
+static __u64 (*bpf_get_sockops_uid_gid)(void *sockops) = (void *) 156;
|
||||
|
||||
/*
|
||||
- * bpf_check_mtu
|
||||
- *
|
||||
- * Check packet size against exceeding MTU of net device (based
|
||||
- * on *ifindex*). This helper will likely be used in combination
|
||||
- * with helpers that adjust/change the packet size.
|
||||
- *
|
||||
- * The argument *len_diff* can be used for querying with a planned
|
||||
- * size change. This allows to check MTU prior to changing packet
|
||||
- * ctx. Providing an *len_diff* adjustment that is larger than the
|
||||
- * actual packet size (resulting in negative packet size) will in
|
||||
- * principle not exceed the MTU, why it is not considered a
|
||||
- * failure. Other BPF-helpers are needed for performing the
|
||||
- * planned size change, why the responsability for catch a negative
|
||||
- * packet size belong in those helpers.
|
||||
- *
|
||||
- * Specifying *ifindex* zero means the MTU check is performed
|
||||
- * against the current net device. This is practical if this isn't
|
||||
- * used prior to redirect.
|
||||
- *
|
||||
- * On input *mtu_len* must be a valid pointer, else verifier will
|
||||
- * reject BPF program. If the value *mtu_len* is initialized to
|
||||
- * zero then the ctx packet size is use. When value *mtu_len* is
|
||||
- * provided as input this specify the L3 length that the MTU check
|
||||
- * is done against. Remember XDP and TC length operate at L2, but
|
||||
- * this value is L3 as this correlate to MTU and IP-header tot_len
|
||||
- * values which are L3 (similar behavior as bpf_fib_lookup).
|
||||
- *
|
||||
- * The Linux kernel route table can configure MTUs on a more
|
||||
- * specific per route level, which is not provided by this helper.
|
||||
- * For route level MTU checks use the **bpf_fib_lookup**\ ()
|
||||
- * helper.
|
||||
- *
|
||||
- * *ctx* is either **struct xdp_md** for XDP programs or
|
||||
- * **struct sk_buff** for tc cls_act programs.
|
||||
- *
|
||||
- * The *flags* argument can be a combination of one or more of the
|
||||
- * following values:
|
||||
- *
|
||||
- * **BPF_MTU_CHK_SEGS**
|
||||
- * This flag will only works for *ctx* **struct sk_buff**.
|
||||
- * If packet context contains extra packet segment buffers
|
||||
- * (often knows as GSO skb), then MTU check is harder to
|
||||
- * check at this point, because in transmit path it is
|
||||
- * possible for the skb packet to get re-segmented
|
||||
- * (depending on net device features). This could still be
|
||||
- * a MTU violation, so this flag enables performing MTU
|
||||
- * check against segments, with a different violation
|
||||
- * return code to tell it apart. Check cannot use len_diff.
|
||||
- *
|
||||
- * On return *mtu_len* pointer contains the MTU value of the net
|
||||
- * device. Remember the net device configured MTU is the L3 size,
|
||||
- * which is returned here and XDP and TC length operate at L2.
|
||||
- * Helper take this into account for you, but remember when using
|
||||
- * MTU value in your BPF-code.
|
||||
+ * bpf_sk_original_addr
|
||||
*
|
||||
+ * Get Ipv4 origdst or replysrc. Works with IPv4.
|
||||
*
|
||||
* Returns
|
||||
- * * 0 on success, and populate MTU value in *mtu_len* pointer.
|
||||
- *
|
||||
- * * < 0 if any input argument is invalid (*mtu_len* not updated)
|
||||
- *
|
||||
- * MTU violations return positive values, but also populate MTU
|
||||
- * value in *mtu_len* pointer, as this can be needed for
|
||||
- * implementing PMTU handing:
|
||||
- *
|
||||
- * * **BPF_MTU_CHK_RET_FRAG_NEEDED**
|
||||
- * * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
|
||||
+ * 0 on success, or a negative error in case of failure.
|
||||
*/
|
||||
-static long (*bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len, __s32 len_diff, __u64 flags) = (void *) 163;
|
||||
-
|
||||
-/*
|
||||
- * bpf_for_each_map_elem
|
||||
- *
|
||||
- * For each element in **map**, call **callback_fn** function with
|
||||
- * **map**, **callback_ctx** and other map-specific parameters.
|
||||
- * The **callback_fn** should be a static function and
|
||||
- * the **callback_ctx** should be a pointer to the stack.
|
||||
- * The **flags** is used to control certain aspects of the helper.
|
||||
- * Currently, the **flags** must be 0.
|
||||
- *
|
||||
- * The following are a list of supported map types and their
|
||||
- * respective expected callback signatures:
|
||||
- *
|
||||
- * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
|
||||
- * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
|
||||
- * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
|
||||
- *
|
||||
- * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
|
||||
- *
|
||||
- * For per_cpu maps, the map_value is the value on the cpu where the
|
||||
- * bpf_prog is running.
|
||||
- *
|
||||
- * If **callback_fn** return 0, the helper will continue to the next
|
||||
- * element. If return value is 1, the helper will skip the rest of
|
||||
- * elements and return. Other return values are not used now.
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * The number of traversed map elements for success, **-EINVAL** for
|
||||
- * invalid **flags**.
|
||||
- */
|
||||
-static long (*bpf_for_each_map_elem)(void *map, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 164;
|
||||
-
|
||||
-/*
|
||||
- * bpf_snprintf
|
||||
- *
|
||||
- * Outputs a string into the **str** buffer of size **str_size**
|
||||
- * based on a format string stored in a read-only map pointed by
|
||||
- * **fmt**.
|
||||
- *
|
||||
- * Each format specifier in **fmt** corresponds to one u64 element
|
||||
- * in the **data** array. For strings and pointers where pointees
|
||||
- * are accessed, only the pointer values are stored in the *data*
|
||||
- * array. The *data_len* is the size of *data* in bytes - must be
|
||||
- * a multiple of 8.
|
||||
- *
|
||||
- * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
|
||||
- * memory. Reading kernel memory may fail due to either invalid
|
||||
- * address or valid address but requiring a major memory fault. If
|
||||
- * reading kernel memory fails, the string for **%s** will be an
|
||||
- * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
|
||||
- * Not returning error to bpf program is consistent with what
|
||||
- * **bpf_trace_printk**\ () does for now.
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * The strictly positive length of the formatted string, including
|
||||
- * the trailing zero character. If the return value is greater than
|
||||
- * **str_size**, **str** contains a truncated string, guaranteed to
|
||||
- * be zero-terminated except when **str_size** is 0.
|
||||
- *
|
||||
- * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
|
||||
- */
|
||||
-static long (*bpf_snprintf)(char *str, __u32 str_size, const char *fmt, __u64 *data, __u32 data_len) = (void *) 165;
|
||||
-
|
||||
-/*
|
||||
- * bpf_sys_bpf
|
||||
- *
|
||||
- * Execute bpf syscall with given arguments.
|
||||
- *
|
||||
- * Returns
|
||||
- * A syscall result.
|
||||
- */
|
||||
-static long (*bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) = (void *) 166;
|
||||
-
|
||||
-/*
|
||||
- * bpf_btf_find_by_name_kind
|
||||
- *
|
||||
- * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
|
||||
- *
|
||||
- * Returns
|
||||
- * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
|
||||
- */
|
||||
-static long (*bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) = (void *) 167;
|
||||
-
|
||||
-/*
|
||||
- * bpf_sys_close
|
||||
- *
|
||||
- * Execute close syscall for given FD.
|
||||
- *
|
||||
- * Returns
|
||||
- * A syscall result.
|
||||
- */
|
||||
-static long (*bpf_sys_close)(__u32 fd) = (void *) 168;
|
||||
-
|
||||
-/*
|
||||
- * bpf_timer_init
|
||||
- *
|
||||
- * Initialize the timer.
|
||||
- * First 4 bits of *flags* specify clockid.
|
||||
- * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
|
||||
- * All other bits of *flags* are reserved.
|
||||
- * The verifier will reject the program if *timer* is not from
|
||||
- * the same *map*.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- * **-EBUSY** if *timer* is already initialized.
|
||||
- * **-EINVAL** if invalid *flags* are passed.
|
||||
- * **-EPERM** if *timer* is in a map that doesn't have any user references.
|
||||
- * The user space should either hold a file descriptor to a map with timers
|
||||
- * or pin such map in bpffs. When map is unpinned or file descriptor is
|
||||
- * closed all timers in the map will be cancelled and freed.
|
||||
- */
|
||||
-static long (*bpf_timer_init)(struct bpf_timer *timer, void *map, __u64 flags) = (void *) 169;
|
||||
-
|
||||
-/*
|
||||
- * bpf_timer_set_callback
|
||||
- *
|
||||
- * Configure the timer to call *callback_fn* static function.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
||||
- * **-EPERM** if *timer* is in a map that doesn't have any user references.
|
||||
- * The user space should either hold a file descriptor to a map with timers
|
||||
- * or pin such map in bpffs. When map is unpinned or file descriptor is
|
||||
- * closed all timers in the map will be cancelled and freed.
|
||||
- */
|
||||
-static long (*bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn) = (void *) 170;
|
||||
-
|
||||
-/*
|
||||
- * bpf_timer_start
|
||||
- *
|
||||
- * Set timer expiration N nanoseconds from the current time. The
|
||||
- * configured callback will be invoked in soft irq context on some cpu
|
||||
- * and will not repeat unless another bpf_timer_start() is made.
|
||||
- * In such case the next invocation can migrate to a different cpu.
|
||||
- * Since struct bpf_timer is a field inside map element the map
|
||||
- * owns the timer. The bpf_timer_set_callback() will increment refcnt
|
||||
- * of BPF program to make sure that callback_fn code stays valid.
|
||||
- * When user space reference to a map reaches zero all timers
|
||||
- * in a map are cancelled and corresponding program's refcnts are
|
||||
- * decremented. This is done to make sure that Ctrl-C of a user
|
||||
- * process doesn't leave any timers running. If map is pinned in
|
||||
- * bpffs the callback_fn can re-arm itself indefinitely.
|
||||
- * bpf_map_update/delete_elem() helpers and user space sys_bpf commands
|
||||
- * cancel and free the timer in the given map element.
|
||||
- * The map can contain timers that invoke callback_fn-s from different
|
||||
- * programs. The same callback_fn can serve different timers from
|
||||
- * different maps if key/value layout matches across maps.
|
||||
- * Every bpf_timer_set_callback() can have different callback_fn.
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
|
||||
- * or invalid *flags* are passed.
|
||||
- */
|
||||
-static long (*bpf_timer_start)(struct bpf_timer *timer, __u64 nsecs, __u64 flags) = (void *) 171;
|
||||
-
|
||||
-/*
|
||||
- * bpf_timer_cancel
|
||||
- *
|
||||
- * Cancel the timer and wait for callback_fn to finish if it was running.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 if the timer was not active.
|
||||
- * 1 if the timer was active.
|
||||
- * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
||||
- * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
||||
- * own timer which would have led to a deadlock otherwise.
|
||||
- */
|
||||
-static long (*bpf_timer_cancel)(struct bpf_timer *timer) = (void *) 172;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_func_ip
|
||||
- *
|
||||
- * Get address of the traced function (for tracing and kprobe programs).
|
||||
- *
|
||||
- * Returns
|
||||
- * Address of the traced function.
|
||||
- */
|
||||
-static __u64 (*bpf_get_func_ip)(void *ctx) = (void *) 173;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_attach_cookie
|
||||
- *
|
||||
- * Get bpf_cookie value provided (optionally) during the program
|
||||
- * attachment. It might be different for each individual
|
||||
- * attachment, even if BPF program itself is the same.
|
||||
- * Expects BPF program context *ctx* as a first argument.
|
||||
- *
|
||||
- * Supported for the following program types:
|
||||
- * - kprobe/uprobe;
|
||||
- * - tracepoint;
|
||||
- * - perf_event.
|
||||
- *
|
||||
- * Returns
|
||||
- * Value specified by user at BPF link creation/attachment time
|
||||
- * or 0, if it was not specified.
|
||||
- */
|
||||
-static __u64 (*bpf_get_attach_cookie)(void *ctx) = (void *) 174;
|
||||
-
|
||||
-/*
|
||||
- * bpf_task_pt_regs
|
||||
- *
|
||||
- * Get the struct pt_regs associated with **task**.
|
||||
- *
|
||||
- * Returns
|
||||
- * A pointer to struct pt_regs.
|
||||
- */
|
||||
-static long (*bpf_task_pt_regs)(struct task_struct *task) = (void *) 175;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_branch_snapshot
|
||||
- *
|
||||
- * Get branch trace from hardware engines like Intel LBR. The
|
||||
- * hardware engine is stopped shortly after the helper is
|
||||
- * called. Therefore, the user need to filter branch entries
|
||||
- * based on the actual use case. To capture branch trace
|
||||
- * before the trigger point of the BPF program, the helper
|
||||
- * should be called at the beginning of the BPF program.
|
||||
- *
|
||||
- * The data is stored as struct perf_branch_entry into output
|
||||
- * buffer *entries*. *size* is the size of *entries* in bytes.
|
||||
- * *flags* is reserved for now and must be zero.
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * On success, number of bytes written to *buf*. On error, a
|
||||
- * negative value.
|
||||
- *
|
||||
- * **-EINVAL** if *flags* is not zero.
|
||||
- *
|
||||
- * **-ENOENT** if architecture does not support branch records.
|
||||
- */
|
||||
-static long (*bpf_get_branch_snapshot)(void *entries, __u32 size, __u64 flags) = (void *) 176;
|
||||
-
|
||||
-/*
|
||||
- * bpf_trace_vprintk
|
||||
- *
|
||||
- * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
|
||||
- * to format and can handle more format args as a result.
|
||||
- *
|
||||
- * Arguments are to be used as in **bpf_seq_printf**\ () helper.
|
||||
- *
|
||||
- * Returns
|
||||
- * The number of bytes written to the buffer, or a negative error
|
||||
- * in case of failure.
|
||||
- */
|
||||
-static long (*bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 177;
|
||||
-
|
||||
-/*
|
||||
- * bpf_skc_to_unix_sock
|
||||
- *
|
||||
- * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
|
||||
- *
|
||||
- * Returns
|
||||
- * *sk* if casting is valid, or **NULL** otherwise.
|
||||
- */
|
||||
-static struct unix_sock *(*bpf_skc_to_unix_sock)(void *sk) = (void *) 178;
|
||||
-
|
||||
-/*
|
||||
- * bpf_kallsyms_lookup_name
|
||||
- *
|
||||
- * Get the address of a kernel symbol, returned in *res*. *res* is
|
||||
- * set to 0 if the symbol is not found.
|
||||
- *
|
||||
- * Returns
|
||||
- * On success, zero. On error, a negative value.
|
||||
- *
|
||||
- * **-EINVAL** if *flags* is not zero.
|
||||
- *
|
||||
- * **-EINVAL** if string *name* is not the same size as *name_sz*.
|
||||
- *
|
||||
- * **-ENOENT** if symbol is not found.
|
||||
- *
|
||||
- * **-EPERM** if caller does not have permission to obtain kernel address.
|
||||
- */
|
||||
-static long (*bpf_kallsyms_lookup_name)(const char *name, int name_sz, int flags, __u64 *res) = (void *) 179;
|
||||
-
|
||||
-/*
|
||||
- * bpf_find_vma
|
||||
- *
|
||||
- * Find vma of *task* that contains *addr*, call *callback_fn*
|
||||
- * function with *task*, *vma*, and *callback_ctx*.
|
||||
- * The *callback_fn* should be a static function and
|
||||
- * the *callback_ctx* should be a pointer to the stack.
|
||||
- * The *flags* is used to control certain aspects of the helper.
|
||||
- * Currently, the *flags* must be 0.
|
||||
- *
|
||||
- * The expected callback signature is
|
||||
- *
|
||||
- * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
|
||||
- * **-EBUSY** if failed to try lock mmap_lock.
|
||||
- * **-EINVAL** for invalid **flags**.
|
||||
- */
|
||||
-static long (*bpf_find_vma)(struct task_struct *task, __u64 addr, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 180;
|
||||
-
|
||||
-/*
|
||||
- * bpf_loop
|
||||
- *
|
||||
- * For **nr_loops**, call **callback_fn** function
|
||||
- * with **callback_ctx** as the context parameter.
|
||||
- * The **callback_fn** should be a static function and
|
||||
- * the **callback_ctx** should be a pointer to the stack.
|
||||
- * The **flags** is used to control certain aspects of the helper.
|
||||
- * Currently, the **flags** must be 0. Currently, nr_loops is
|
||||
- * limited to 1 << 23 (~8 million) loops.
|
||||
- *
|
||||
- * long (\*callback_fn)(u32 index, void \*ctx);
|
||||
- *
|
||||
- * where **index** is the current index in the loop. The index
|
||||
- * is zero-indexed.
|
||||
- *
|
||||
- * If **callback_fn** returns 0, the helper will continue to the next
|
||||
- * loop. If return value is 1, the helper will skip the rest of
|
||||
- * the loops and return. Other return values are not used now,
|
||||
- * and will be rejected by the verifier.
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * The number of loops performed, **-EINVAL** for invalid **flags**,
|
||||
- * **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
|
||||
- */
|
||||
-static long (*bpf_loop)(__u32 nr_loops, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 181;
|
||||
-
|
||||
-/*
|
||||
- * bpf_strncmp
|
||||
- *
|
||||
- * Do strncmp() between **s1** and **s2**. **s1** doesn't need
|
||||
- * to be null-terminated and **s1_sz** is the maximum storage
|
||||
- * size of **s1**. **s2** must be a read-only string.
|
||||
- *
|
||||
- * Returns
|
||||
- * An integer less than, equal to, or greater than zero
|
||||
- * if the first **s1_sz** bytes of **s1** is found to be
|
||||
- * less than, to match, or be greater than **s2**.
|
||||
- */
|
||||
-static long (*bpf_strncmp)(const char *s1, __u32 s1_sz, const char *s2) = (void *) 182;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_func_arg
|
||||
- *
|
||||
- * Get **n**-th argument (zero based) of the traced function (for tracing programs)
|
||||
- * returned in **value**.
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- * **-EINVAL** if n >= arguments count of traced function.
|
||||
- */
|
||||
-static long (*bpf_get_func_arg)(void *ctx, __u32 n, __u64 *value) = (void *) 183;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_func_ret
|
||||
- *
|
||||
- * Get return value of the traced function (for tracing programs)
|
||||
- * in **value**.
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
|
||||
- */
|
||||
-static long (*bpf_get_func_ret)(void *ctx, __u64 *value) = (void *) 184;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_func_arg_cnt
|
||||
- *
|
||||
- * Get number of arguments of the traced function (for tracing programs).
|
||||
- *
|
||||
- *
|
||||
- * Returns
|
||||
- * The number of arguments of the traced function.
|
||||
- */
|
||||
-static long (*bpf_get_func_arg_cnt)(void *ctx) = (void *) 185;
|
||||
-
|
||||
-/*
|
||||
- * bpf_get_retval
|
||||
- *
|
||||
- * Get the syscall's return value that will be returned to userspace.
|
||||
- *
|
||||
- * This helper is currently supported by cgroup programs only.
|
||||
- *
|
||||
- * Returns
|
||||
- * The syscall's return value.
|
||||
- */
|
||||
-static int (*bpf_get_retval)(void) = (void *) 186;
|
||||
-
|
||||
-/*
|
||||
- * bpf_set_retval
|
||||
- *
|
||||
- * Set the syscall's return value that will be returned to userspace.
|
||||
- *
|
||||
- * This helper is currently supported by cgroup programs only.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success, or a negative error in case of failure.
|
||||
- */
|
||||
-static int (*bpf_set_retval)(int retval) = (void *) 187;
|
||||
-
|
||||
-/*
|
||||
- * bpf_xdp_get_buff_len
|
||||
- *
|
||||
- * Get the total size of a given xdp buff (linear and paged area)
|
||||
- *
|
||||
- * Returns
|
||||
- * The total size of a given xdp buffer.
|
||||
- */
|
||||
-static __u64 (*bpf_xdp_get_buff_len)(struct xdp_md *xdp_md) = (void *) 188;
|
||||
-
|
||||
-/*
|
||||
- * bpf_xdp_load_bytes
|
||||
- *
|
||||
- * This helper is provided as an easy way to load data from a
|
||||
- * xdp buffer. It can be used to load *len* bytes from *offset* from
|
||||
- * the frame associated to *xdp_md*, into the buffer pointed by
|
||||
- * *buf*.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success, or a negative error in case of failure.
|
||||
- */
|
||||
-static long (*bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 189;
|
||||
-
|
||||
-/*
|
||||
- * bpf_xdp_store_bytes
|
||||
- *
|
||||
- * Store *len* bytes from buffer *buf* into the frame
|
||||
- * associated to *xdp_md*, at *offset*.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success, or a negative error in case of failure.
|
||||
- */
|
||||
-static long (*bpf_xdp_store_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 190;
|
||||
-
|
||||
-/*
|
||||
- * bpf_copy_from_user_task
|
||||
- *
|
||||
- * Read *size* bytes from user space address *user_ptr* in *tsk*'s
|
||||
- * address space, and stores the data in *dst*. *flags* is not
|
||||
- * used yet and is provided for future extensibility. This helper
|
||||
- * can only be used by sleepable programs.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success, or a negative error in case of failure. On error
|
||||
- * *dst* buffer is zeroed out.
|
||||
- */
|
||||
-static long (*bpf_copy_from_user_task)(void *dst, __u32 size, const void *user_ptr, struct task_struct *tsk, __u64 flags) = (void *) 191;
|
||||
-
|
||||
-/*
|
||||
- * bpf_skb_set_tstamp
|
||||
- *
|
||||
- * Change the __sk_buff->tstamp_type to *tstamp_type*
|
||||
- * and set *tstamp* to the __sk_buff->tstamp together.
|
||||
- *
|
||||
- * If there is no need to change the __sk_buff->tstamp_type,
|
||||
- * the tstamp value can be directly written to __sk_buff->tstamp
|
||||
- * instead.
|
||||
- *
|
||||
- * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
|
||||
- * will be kept during bpf_redirect_*(). A non zero
|
||||
- * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
|
||||
- * *tstamp_type*.
|
||||
- *
|
||||
- * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
|
||||
- * with a zero *tstamp*.
|
||||
- *
|
||||
- * Only IPv4 and IPv6 skb->protocol are supported.
|
||||
- *
|
||||
- * This function is most useful when it needs to set a
|
||||
- * mono delivery time to __sk_buff->tstamp and then
|
||||
- * bpf_redirect_*() to the egress of an iface. For example,
|
||||
- * changing the (rcv) timestamp in __sk_buff->tstamp at
|
||||
- * ingress to a mono delivery time and then bpf_redirect_*()
|
||||
- * to sch_fq@phy-dev.
|
||||
- *
|
||||
- * Returns
|
||||
- * 0 on success.
|
||||
- * **-EINVAL** for invalid input
|
||||
- * **-EOPNOTSUPP** for unsupported protocol
|
||||
- */
|
||||
-static long (*bpf_skb_set_tstamp)(struct __sk_buff *skb, __u64 tstamp, __u32 tstamp_type) = (void *) 192;
|
||||
-
|
||||
-/*
|
||||
- * bpf_ima_file_hash
|
||||
- *
|
||||
- * Returns a calculated IMA hash of the *file*.
|
||||
- * If the hash is larger than *size*, then only *size*
|
||||
- * bytes will be copied to *dst*
|
||||
- *
|
||||
- * Returns
|
||||
- * The **hash_algo** is returned on success,
|
||||
- * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
|
||||
- * invalid arguments are passed.
|
||||
- */
|
||||
-static long (*bpf_ima_file_hash)(struct file *file, void *dst, __u32 size) = (void *) 193;
|
||||
-
|
||||
-/*
|
||||
- * bpf_kptr_xchg
|
||||
- *
|
||||
- * Exchange kptr at pointer *map_value* with *ptr*, and return the
|
||||
- * old value. *ptr* can be NULL, otherwise it must be a referenced
|
||||
- * pointer which will be released when this helper is called.
|
||||
- *
|
||||
- * Returns
|
||||
- * The old value of kptr (which can be NULL). The returned pointer
|
||||
- * if not NULL, is a reference which must be released using its
|
||||
- * corresponding release function, or moved into a BPF map before
|
||||
- * program exit.
|
||||
- */
|
||||
-static void *(*bpf_kptr_xchg)(void *map_value, void *ptr) = (void *) 194;
|
||||
-
|
||||
-/*
|
||||
- * bpf_map_lookup_percpu_elem
|
||||
- *
|
||||
- * Perform a lookup in *percpu map* for an entry associated to
|
||||
- * *key* on *cpu*.
|
||||
- *
|
||||
- * Returns
|
||||
- * Map value associated to *key* on *cpu*, or **NULL** if no entry
|
||||
- * was found or *cpu* is invalid.
|
||||
- */
|
||||
-static void *(*bpf_map_lookup_percpu_elem)(void *map, const void *key, __u32 cpu) = (void *) 195;
|
||||
-
|
||||
+static int (*bpf_sk_original_addr)(void *bpf_socket, int optname, char *optval, int optlen) = (void *) 157;
|
||||
|
||||
--
|
||||
2.33.0
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
|
||||
Name: %{githubname}
|
||||
Version: %{githubver}
|
||||
Release: 9
|
||||
Release: 10
|
||||
Summary: Libbpf library
|
||||
|
||||
License: LGPLv2 or BSD
|
||||
@ -33,6 +33,7 @@ Patch0016: backport-libbpf-Use-correct-return-pointer-in-attach_raw_tp.patc
|
||||
Patch0017: backport-libbpf-Use-elf_getshdrnum-instead-of-e_shnum.patch
|
||||
Patch0018: backport-libbpf-Ensure-FD-3-during-bpf_map__reuse_fd.patch
|
||||
Patch0019: backport-libbpf-Ensure-libbpf-always-opens-files-with-O_CLOEX.patch
|
||||
Patch0020: 0001-remove-nonexistent-helper-functions-and-add-new-help.patch
|
||||
|
||||
# This package supersedes libbpf from kernel-tools,
|
||||
# which has default Epoch: 0. By having Epoch: 1
|
||||
@ -85,6 +86,10 @@ developing applications that use %{name}
|
||||
%{_libdir}/libbpf.a
|
||||
|
||||
%changelog
|
||||
* Fri Dec 1 2023 liningjie <liningjie@xfusion.com> 2:0.8.1-10
|
||||
- backport patches from openEuler-22.03-LTS-SP2:
|
||||
0001-remove-nonexistent-helper-functions-and-add-new-help.patch
|
||||
|
||||
* Mon Aug 14 2023 zhangmingyi <zhangmingyi5@huawei.com> 2:0.8.1-9
|
||||
- backport patches from upstream:
|
||||
backport-libbpf-Ensure-FD-3-during-bpf_map__reuse_fd.patch
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user