aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorCatalin Marinas2020-07-31 18:09:57 +0100
committerCatalin Marinas2020-07-31 18:09:57 +0100
commit0e4cd9f2654915be8d09a1bd1b405ce5426e64c4 (patch)
tree0aeb119873c87509ff54cbc9c6946b271f3780bf /tools
parent18aa3bd58b1428d1927fe11f85ad444423d4fc59 (diff)
parent5f1f7f6c205a2e7f1d92229ac358254bd2826c2d (diff)
Merge branch 'for-next/read-barrier-depends' into for-next/core
* for-next/read-barrier-depends: : Allow architectures to override __READ_ONCE() arm64: Reduce the number of header files pulled into vmlinux.lds.S compiler.h: Move compiletime_assert() macros into compiler_types.h checkpatch: Remove checks relating to [smp_]read_barrier_depends() include/linux: Remove smp_read_barrier_depends() from comments tools/memory-model: Remove smp_read_barrier_depends() from informal doc Documentation/barriers/kokr: Remove references to [smp_]read_barrier_depends() Documentation/barriers: Remove references to [smp_]read_barrier_depends() locking/barriers: Remove definitions for [smp_]read_barrier_depends() alpha: Replace smp_read_barrier_depends() usage with smp_[r]mb() vhost: Remove redundant use of read_barrier_depends() barrier asm/rwonce: Don't pull <asm/barrier.h> into 'asm-generic/rwonce.h' asm/rwonce: Remove smp_read_barrier_depends() invocation alpha: Override READ_ONCE() with barriered implementation asm/rwonce: Allow __READ_ONCE to be overridden by the architecture compiler.h: Split {READ,WRITE}_ONCE definitions out into rwonce.h tools: bpf: Use local copy of headers including uapi/linux/filter.h
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/Makefile3
-rw-r--r--tools/include/uapi/linux/filter.h90
-rw-r--r--tools/memory-model/Documentation/explanation.txt26
3 files changed, 104 insertions, 15 deletions
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 6df1850f8353..8a69258fd8aa 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -9,7 +9,8 @@ MAKE = make
INSTALL ?= install
CFLAGS += -Wall -O2
-CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
+CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi \
+ -I$(srctree)/tools/include
# This will work when bpf is built in tools env. where srctree
# isn't set and when invoked from selftests build, where srctree
diff --git a/tools/include/uapi/linux/filter.h b/tools/include/uapi/linux/filter.h
new file mode 100644
index 000000000000..eaef459e7bd4
--- /dev/null
+++ b/tools/include/uapi/linux/filter.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Linux Socket Filter Data Structures
+ */
+
+#ifndef __LINUX_FILTER_H__
+#define __LINUX_FILTER_H__
+
+
+#include <linux/types.h>
+#include <linux/bpf_common.h>
+
+/*
+ * Current version of the filter code architecture.
+ */
+#define BPF_MAJOR_VERSION 1
+#define BPF_MINOR_VERSION 1
+
+/*
+ * Try and keep these values and structures similar to BSD, especially
+ * the BPF code definitions which need to match so you can share filters
+ */
+
+struct sock_filter { /* Filter block */
+ __u16 code; /* Actual filter code */
+ __u8 jt; /* Jump true */
+ __u8 jf; /* Jump false */
+ __u32 k; /* Generic multiuse field */
+};
+
+struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
+ unsigned short len; /* Number of filter blocks */
+ struct sock_filter *filter;
+};
+
+/* ret - BPF_K and BPF_X also apply */
+#define BPF_RVAL(code) ((code) & 0x18)
+#define BPF_A 0x10
+
+/* misc */
+#define BPF_MISCOP(code) ((code) & 0xf8)
+#define BPF_TAX 0x00
+#define BPF_TXA 0x80
+
+/*
+ * Macros for filter block array initializers.
+ */
+#ifndef BPF_STMT
+#define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k }
+#endif
+#ifndef BPF_JUMP
+#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
+#endif
+
+/*
+ * Number of scratch memory words for: BPF_ST and BPF_STX
+ */
+#define BPF_MEMWORDS 16
+
+/* RATIONALE. Negative offsets are invalid in BPF.
+ We use them to reference ancillary data.
+ Unlike introduction new instructions, it does not break
+ existing compilers/optimizers.
+ */
+#define SKF_AD_OFF (-0x1000)
+#define SKF_AD_PROTOCOL 0
+#define SKF_AD_PKTTYPE 4
+#define SKF_AD_IFINDEX 8
+#define SKF_AD_NLATTR 12
+#define SKF_AD_NLATTR_NEST 16
+#define SKF_AD_MARK 20
+#define SKF_AD_QUEUE 24
+#define SKF_AD_HATYPE 28
+#define SKF_AD_RXHASH 32
+#define SKF_AD_CPU 36
+#define SKF_AD_ALU_XOR_X 40
+#define SKF_AD_VLAN_TAG 44
+#define SKF_AD_VLAN_TAG_PRESENT 48
+#define SKF_AD_PAY_OFFSET 52
+#define SKF_AD_RANDOM 56
+#define SKF_AD_VLAN_TPID 60
+#define SKF_AD_MAX 64
+
+#define SKF_NET_OFF (-0x100000)
+#define SKF_LL_OFF (-0x200000)
+
+#define BPF_NET_OFF SKF_NET_OFF
+#define BPF_LL_OFF SKF_LL_OFF
+
+#endif /* __LINUX_FILTER_H__ */
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index e91a2eb19592..01adf9e0ebac 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -1122,12 +1122,10 @@ maintain at least the appearance of FIFO order.
In practice, this difficulty is solved by inserting a special fence
between P1's two loads when the kernel is compiled for the Alpha
architecture. In fact, as of version 4.15, the kernel automatically
-adds this fence (called smp_read_barrier_depends() and defined as
-nothing at all on non-Alpha builds) after every READ_ONCE() and atomic
-load. The effect of the fence is to cause the CPU not to execute any
-po-later instructions until after the local cache has finished
-processing all the stores it has already received. Thus, if the code
-was changed to:
+adds this fence after every READ_ONCE() and atomic load on Alpha. The
+effect of the fence is to cause the CPU not to execute any po-later
+instructions until after the local cache has finished processing all
+the stores it has already received. Thus, if the code was changed to:
P1()
{
@@ -1146,14 +1144,14 @@ READ_ONCE() or another synchronization primitive rather than accessed
directly.
The LKMM requires that smp_rmb(), acquire fences, and strong fences
-share this property with smp_read_barrier_depends(): They do not allow
-the CPU to execute any po-later instructions (or po-later loads in the
-case of smp_rmb()) until all outstanding stores have been processed by
-the local cache. In the case of a strong fence, the CPU first has to
-wait for all of its po-earlier stores to propagate to every other CPU
-in the system; then it has to wait for the local cache to process all
-the stores received as of that time -- not just the stores received
-when the strong fence began.
+share this property: They do not allow the CPU to execute any po-later
+instructions (or po-later loads in the case of smp_rmb()) until all
+outstanding stores have been processed by the local cache. In the
+case of a strong fence, the CPU first has to wait for all of its
+po-earlier stores to propagate to every other CPU in the system; then
+it has to wait for the local cache to process all the stores received
+as of that time -- not just the stores received when the strong fence
+began.
And of course, none of this matters for any architecture other than
Alpha.