New added android kernel bpf helpers are not detected
0
votes
1
answer
111
views
I'm trying to patch an android kernel 4.9 to support
probe_read_{user, kernel} and probe_read_{user, kernel}
helpers. For the backporting I took example from another patch that adds bpf_probe_read_str
helper. While I've patched the kernel to add the helpers and running bpftrace --info, the str helper shows up but the newly added ones don't.
bpftrace output
System
OS: Linux 4.9.337-g4fcceb75c5cd #1 SMP PREEMPT Sat May 18 17:26:12 EEST 2024
Arch: aarch64
Build
version: v0.19.1
LLVM: 14.0.6
unsafe probe: yes
bfd: no
libdw (DWARF support): no
libbpf: failed to find valid kernel BTF
Kernel helpers
probe_read: yes
probe_read_str: yes
probe_read_user: no
probe_read_user_str: no
probe_read_kernel: no
probe_read_kernel_str: no
get_current_cgroup_id: no
send_signal: no
override_return: no
get_boot_ns: no
dpath: no
skboutput: no
get_tai_ns: no
get_func_ip: no
Kernel features
Instruction limit: -1
Loop support: no
btf: no
module btf: no
map batch: no
uprobe refcount (depends on Build:bcc bpf_attach_uprobe refcount): no
Map types
hash: yes
percpu hash: yes
array: yes
percpu array: yes
stack_trace: yes
perf_event_array: yes
ringbuf: no
Probe types
kprobe: yes
tracepoint: yes
perf_event: yes
kfunc: no
kprobe_multi: no
raw_tp_special: no
iter: no
probe_read_{user, kernel} and probe_read_{user, kernel}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index cc5ba47062e8..48762ecbfd66 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -88,6 +88,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
@@ -126,6 +127,8 @@ extern long notrace probe_user_write(void __user *dst, const void *src, size_t s
extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count);
extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
long count);
extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 67d7d771a944..d1036b0ba1fa 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -552,6 +552,11 @@ enum bpf_func_id {
*/
BPF_FUNC_get_socket_uid,
+ BPF_FUNC_probe_read_user,
+ BPF_FUNC_probe_read_kernel,
+ BPF_FUNC_probe_read_user_str,
+ BPF_FUNC_probe_read_kernel_str,
+
__BPF_FUNC_MAX_ID,
};
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 83b20092b84c..e872ab1fb235 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -85,44 +85,6 @@ BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
return ret;
}
-static const struct bpf_func_proto bpf_probe_read_proto = {
- .func = bpf_probe_read,
- .gpl_only = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_RAW_STACK,
- .arg2_type = ARG_CONST_STACK_SIZE,
- .arg3_type = ARG_ANYTHING,
-};
-
-BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, const void *, unsafe_ptr)
-{
- int ret;
-
- /*
- * The strncpy_from_unsafe() call will likely not fill the entire
- * buffer, but that's okay in this circumstance as we're probing
- * arbitrary memory anyway similar to bpf_probe_read() and might
- * as well probe the stack. Thus, memory is explicitly cleared
- * only in error case, so that improper users ignoring return
- * code altogether don't copy garbage; otherwise length of string
- * is returned that can be used for bpf_perf_event_output() et al.
- */
- ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
- if (unlikely(ret < 0))
- memset(dst, 0, size);
-
- return ret;
-}
-
-static const struct bpf_func_proto bpf_probe_read_str_proto = {
- .func = bpf_probe_read_str,
- .gpl_only = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_RAW_STACK,
- .arg2_type = ARG_CONST_STACK_SIZE,
- .arg3_type = ARG_ANYTHING,
-};
-
BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
u32, size)
{
@@ -465,6 +427,139 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
.arg2_type = ARG_ANYTHING,
};
+BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
+{
+ int ret = probe_user_read(dst, unsafe_ptr, size);
+
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_proto = {
+ .func = bpf_probe_read_user,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_RAW_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
+{
+ int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
+
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+ .func = bpf_probe_read_user_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_RAW_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret;
+ ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
+ probe_kernel_read_strict(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+ return ret;
+}
+
+BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+ .func = bpf_probe_read_kernel,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_RAW_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_proto = {
+ .func = bpf_probe_read_compat,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_RAW_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static __always_inline int
+bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret;
+ /*
+ * The strncpy_from_unsafe_*() call will likely not fill the entire
+ * buffer, but that's okay in this circumstance as we're probing
+ * arbitrary memory anyway similar to bpf_probe_read_*() and might
+ * as well probe the stack. Thus, memory is explicitly cleared
+ * only in error case, so that improper users ignoring return
+ * code altogether don't copy garbage; otherwise length of string
+ * is returned that can be used for bpf_perf_event_output() et al.
+ */
+ ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
+ strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+ return ret;
+}
+
+BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
+ .func = bpf_probe_read_kernel_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_RAW_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
+ .func = bpf_probe_read_compat_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_RAW_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -474,10 +569,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
return &bpf_map_update_elem_proto;
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
- case BPF_FUNC_probe_read:
- return &bpf_probe_read_proto;
case BPF_FUNC_probe_read_str:
- return &bpf_probe_read_str_proto;
+ return &bpf_probe_read_compat_str_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_tail_call:
@@ -504,6 +597,17 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_read_user:
+ return &bpf_probe_read_user_proto;
+ case BPF_FUNC_probe_read_kernel:
+ return &bpf_probe_read_kernel_proto;
+ case BPF_FUNC_probe_read:
+ return &bpf_probe_read_compat_proto;
+ case BPF_FUNC_probe_read_user_str:
+ return &bpf_probe_read_user_str_proto;
+ case BPF_FUNC_probe_read_kernel_str:
+ return &bpf_probe_read_kernel_str_proto;
+
default:
return NULL;
}
diff --git a/mm/maccess.c b/mm/maccess.c
index 03ea550f5a74..583935a288ad 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -47,6 +47,9 @@ probe_write_common(void __user *dst, const void *src, size_t size)
long __weak probe_kernel_read(void *dst, const void *src, size_t size)
__attribute__((alias("__probe_kernel_read")));
+long __weak probe_kernel_read_strict(void *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_kernel_read")));
+
long __probe_kernel_read(void *dst, const void *src, size_t size)
{
long ret;
@@ -157,6 +160,10 @@ EXPORT_SYMBOL_GPL(probe_user_write);
* If @count is smaller than the length of the string, copies @count-1 bytes,
* sets the last byte of @dst buffer to NUL and returns @count.
*/
+long __weak strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count)
+ __attribute__((alias("strncpy_from_unsafe")));
+
long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
{
mm_segment_t old_fs = get_fs();
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index a339bea1f4c8..e6caf916d217 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -516,6 +516,11 @@ enum bpf_func_id {
*/
BPF_FUNC_get_socket_uid,
+ BPF_FUNC_probe_read_user,
+ BPF_FUNC_probe_read_kernel,
+ BPF_FUNC_probe_read_user_str,
+ BPF_FUNC_probe_read_kernel_str,
+
__BPF_FUNC_MAX_ID,
};
bpf: add bpf_probe_read_str helper
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 83b20092b84c..59182e6d6f51 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -465,6 +465,36 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
.arg2_type = ARG_ANYTHING,
};
+BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ int ret;
+
+ /*
+ * The strncpy_from_unsafe() call will likely not fill the entire
+ * buffer, but that's okay in this circumstance as we're probing
+ * arbitrary memory anyway similar to bpf_probe_read() and might
+ * as well probe the stack. Thus, memory is explicitly cleared
+ * only in error case, so that improper users ignoring return
+ * code altogether don't copy garbage; otherwise length of string
+ * is returned that can be used for bpf_perf_event_output() et al.
+ */
+ ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_str_proto = {
+ .func = bpf_probe_read_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_RAW_STACK,
+ .arg2_type = ARG_CONST_STACK_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -504,6 +534,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_read_str:
+ return &bpf_probe_read_str_proto;
default:
return NULL;
}
Asked by Marcel
(1 rep)
May 18, 2024, 02:54 PM
Last activity: May 24, 2024, 04:52 PM
Last activity: May 24, 2024, 04:52 PM